repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
bitprophet/releases
|
releases/__init__.py
|
handle_first_release_line
|
python
|
def handle_first_release_line(entries, manager):
# It's remotely possible the changelog is totally empty...
if not entries:
return
# Obtain (short-circuiting) first Release obj.
first_release = None
for obj in entries:
if isinstance(obj, Release):
first_release = obj
break
# It's also possible it's non-empty but has no releases yet.
if first_release:
manager.add_family(obj.family)
# If God did not exist, man would be forced to invent him.
else:
manager.add_family(0)
|
Set up initial line-manager entry for first encountered release line.
To be called at start of overall process; afterwards, subsequent major
lines are generated by `handle_upcoming_major_release`.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L434-L455
|
[
"def add_family(self, major_number):\n \"\"\"\n Expand to a new release line with given ``major_number``.\n\n This will flesh out mandatory buckets like ``unreleased_bugfix`` and do\n other necessary bookkeeping.\n \"\"\"\n # Normally, we have separate buckets for bugfixes vs features\n keys = ['unreleased_bugfix', 'unreleased_feature']\n # But unstable prehistorical releases roll all up into just\n # 'unreleased'\n if major_number == 0 and self.config.releases_unstable_prehistory:\n keys = ['unreleased']\n # Either way, the buckets default to an empty list\n self[major_number] = {key: [] for key in keys}\n"
] |
import itertools
import re
import sys
from functools import partial
from docutils import nodes, utils
from docutils.parsers.rst import roles
import six
from .models import Issue, ISSUE_TYPES, Release, Version, Spec
from .line_manager import LineManager
from ._version import __version__
def _log(txt, config):
"""
Log debug output if debug setting is on.
Intended to be partial'd w/ config at top of functions. Meh.
"""
if config.releases_debug:
sys.stderr.write(str(txt) + "\n")
sys.stderr.flush()
def issue_nodelist(name, identifier=None):
which = '[<span style="color: #%s;">%s</span>]' % (
ISSUE_TYPES[name], name.capitalize()
)
signifier = [nodes.raw(text=which, format='html')]
id_nodelist = [nodes.inline(text=" "), identifier] if identifier else []
trail = [] if identifier else [nodes.inline(text=" ")]
return signifier + id_nodelist + [nodes.inline(text=":")] + trail
release_line_re = re.compile(r'^(\d+\.\d+)\+$') # e.g. '1.2+'
def scan_for_spec(keyword):
"""
Attempt to return some sort of Spec from given keyword value.
Returns None if one could not be derived.
"""
# Both 'spec' formats are wrapped in parens, discard
keyword = keyword.lstrip('(').rstrip(')')
# First, test for intermediate '1.2+' style
matches = release_line_re.findall(keyword)
if matches:
return Spec(">={}".format(matches[0]))
# Failing that, see if Spec can make sense of it
try:
return Spec(keyword)
# I've only ever seen Spec fail with ValueError.
except ValueError:
return None
def issues_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Use: :issue|bug|feature|support:`ticket_number`
When invoked as :issue:, turns into just a "#NN" hyperlink to
`releases_issue_uri`.
When invoked otherwise, turns into "[Type] <#NN hyperlink>: ".
Spaces present in the "ticket number" are used as fields for keywords
(major, backported) and/or specs (e.g. '>=1.0'). This data is removed &
used when constructing the object.
May give a 'ticket number' of ``-`` or ``0`` to generate no hyperlink.
"""
parts = utils.unescape(text).split()
issue_no = parts.pop(0)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
if issue_no not in ('-', '0'):
ref = None
if config.releases_issue_uri:
# TODO: deal with % vs .format()
ref = config.releases_issue_uri % issue_no
elif config.releases_github_path:
ref = "https://github.com/{}/issues/{}".format(
config.releases_github_path, issue_no)
# Only generate a reference/link if we were able to make a URI
if ref:
identifier = nodes.reference(
rawtext, '#' + issue_no, refuri=ref, **options
)
# Otherwise, just make it regular text
else:
identifier = nodes.raw(
rawtext=rawtext, text='#' + issue_no, format='html',
**options
)
else:
identifier = None
issue_no = None # So it doesn't gum up dupe detection later
# Additional 'new-style changelog' stuff
if name in ISSUE_TYPES:
nodelist = issue_nodelist(name, identifier)
spec = None
keyword = None
# TODO: sanity checks re: e.g. >2 parts, >1 instance of keywords, >1
# instance of specs, etc.
for part in parts:
maybe_spec = scan_for_spec(part)
if maybe_spec:
spec = maybe_spec
else:
if part in ('backported', 'major'):
keyword = part
else:
err = "Gave unknown keyword {!r} for issue no. {}"
raise ValueError(err.format(keyword, issue_no))
# Create temporary node w/ data & final nodes to publish
node = Issue(
number=issue_no,
type_=name,
nodelist=nodelist,
backported=(keyword == 'backported'),
major=(keyword == 'major'),
spec=spec,
)
return [node], []
# Return old style info for 'issue' for older changelog entries
else:
return [identifier], []
def release_nodes(text, slug, date, config):
# Doesn't seem possible to do this "cleanly" (i.e. just say "make me a
# title and give it these HTML attributes during render time) so...fuckit.
# We were already doing fully raw elements elsewhere anyway. And who cares
# about a PDF of a changelog? :x
uri = None
if config.releases_release_uri:
# TODO: % vs .format()
uri = config.releases_release_uri % slug
elif config.releases_github_path:
uri = "https://github.com/{}/tree/{}".format(
config.releases_github_path, slug)
# Only construct link tag if user actually configured release URIs somehow
if uri:
link = '<a class="reference external" href="{}">{}</a>'.format(
uri, text,
)
else:
link = text
datespan = ''
if date:
datespan = ' <span style="font-size: 75%;">{}</span>'.format(date)
header = '<h2 style="margin-bottom: 0.3em;">{}{}</h2>'.format(
link, datespan)
return nodes.section('',
nodes.raw(rawtext='', text=header, format='html'),
ids=[text]
)
year_arg_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
def release_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Invoked as :release:`N.N.N <YYYY-MM-DD>`.
Turns into useful release header + link to GH tree for the tag.
"""
# Make sure year has been specified
match = year_arg_re.match(text)
if not match:
msg = inliner.reporter.error("Must specify release date!")
return [inliner.problematic(rawtext, rawtext, msg)], [msg]
number, date = match.group(1), match.group(2)
# Lol @ access back to Sphinx
config = inliner.document.settings.env.app.config
nodelist = [release_nodes(number, number, date, config)]
# Return intermediate node
node = Release(number=number, date=date, nodelist=nodelist)
return [node], []
def generate_unreleased_entry(header, line, issues, manager, app):
log = partial(_log, config=app.config)
nodelist = [release_nodes(
header,
# TODO: should link to master for newest family and...what
# exactly, for the others? Expectation isn't necessarily to
# have a branch per family? Or is there? Maybe there must be..
'master',
None,
app.config
)]
log("Creating {!r} faux-release with {!r}".format(line, issues))
return {
'obj': Release(number=line, date=None, nodelist=nodelist),
'entries': issues,
}
def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
)
def reorder_release_entries(releases):
"""
Mutate ``releases`` so the entrylist in each is ordered by feature/bug/etc.
"""
order = {'feature': 0, 'bug': 1, 'support': 2}
for release in releases:
entries = release['entries'][:]
release['entries'] = sorted(entries, key=lambda x: order[x.type])
def construct_entry_with_release(focus, issues, manager, log, releases, rest):
"""
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
"""
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x)
def construct_entry_without_release(focus, issues, manager, log, rest):
# Handle rare-but-valid non-issue-attached line items, which are
# always bugs. (They are their own description.)
if not isinstance(focus, Issue):
# First, sanity check for potential mistakes resulting in an issue node
# being buried within something else.
buried = focus.traverse(Issue)
if buried:
msg = """
Found issue node ({!r}) buried inside another node:
{}
Please double-check your ReST syntax! There is probably text in the above
output that will show you which part of your changelog to look at.
For example, indentation problems can accidentally generate nested definition
lists.
"""
raise ValueError(msg.format(buried[0], str(buried[0].parent)))
# OK, it looks legit - make it a bug.
log("Found line item w/ no real issue object, creating bug")
nodelist = issue_nodelist('bug')
# Skip nodelist entirely if we're in unstable prehistory -
# classification doesn't matter there.
if manager.unstable_prehistory:
nodelist = []
# Undo the 'pop' from outer scope. TODO: rework things so we don't have
# to do this dumb shit uggggh
rest[0].insert(0, focus)
focus = Issue(
type_='bug',
nodelist=nodelist,
description=rest,
)
else:
focus.attributes['description'] = rest
# Add to global list (for use by explicit releases) or die trying
issues[focus.number] = issues.get(focus.number, []) + [focus]
# Add to per-release bugfix lines and/or unreleased bug/feature buckets, as
# necessary.
# TODO: suspect all of add_to_manager can now live in the manager; most of
# Release's methods should probably go that way
if manager.unstable_prehistory:
log("Unstable prehistory -> adding to 0.x unreleased bucket")
manager[0]['unreleased'].append(focus)
else:
log("Adding to release line manager")
focus.add_to_manager(manager)
def handle_upcoming_major_release(entries, manager):
# Short-circuit if the future holds nothing for us
if not entries:
return
# Short-circuit if we're in the middle of a block of releases, only the
# last release before a bunch of issues, should be taking any action.
if isinstance(entries[0], Release):
return
# Iterate through entries til we find the next Release or set of Releases
next_releases = []
for index, obj in enumerate(entries):
if isinstance(obj, Release):
next_releases.append(obj)
# Non-empty next_releases + encountered a non-release = done w/ release
# block.
elif next_releases:
break
# Examine result: is a major release present? If so, add its major number
# to the line manager!
for obj in next_releases:
# TODO: update when Release gets tied closer w/ Version
version = Version(obj.number)
if version.minor == 0 and version.patch == 0:
manager.add_family(obj.family)
def construct_releases(entries, app):
log = partial(_log, config=app.config)
# Walk from back to front, consuming entries & copying them into
# per-release buckets as releases are encountered. Store releases in order.
releases = []
# Release lines, to be organized by major releases, then by major+minor,
# alongside per-major-release 'unreleased' bugfix/feature buckets.
# NOTE: With exception of unstable_prehistory=True, which triggers use of a
# separate, undifferentiated 'unreleased' bucket (albeit still within the
# '0' major line family).
manager = LineManager(app)
# Also keep a master hash of issues by number to detect duplicates & assist
# in explicitly defined release lists.
issues = {}
reversed_entries = list(reversed(entries))
# For the lookahead, so we're not doing this stripping O(n) times.
# TODO: probs just merge the two into e.g. a list of 2-tuples of "actual
# entry obj + rest"?
stripped_entries = [x[0][0] for x in reversed_entries]
# Perform an initial lookahead to prime manager with the 1st major release
handle_first_release_line(stripped_entries, manager)
# Start crawling...
for index, obj in enumerate(reversed_entries):
# Issue object is always found in obj (LI) index 0 (first, often only
# P) and is the 1st item within that (index 0 again).
# Preserve all other contents of 'obj'.
focus = obj[0].pop(0)
rest = obj
log(repr(focus))
# Releases 'eat' the entries in their line's list and get added to the
# final data structure. They also inform new release-line 'buffers'.
# Release lines, once the release obj is removed, should be empty or a
# comma-separated list of issue numbers.
if isinstance(focus, Release):
construct_entry_with_release(
focus, issues, manager, log, releases, rest
)
# After each release is handled, look ahead to see if we're
# entering "last stretch before a major release". If so,
# pre-emptively update the line-manager so upcoming features are
# correctly sorted into that major release by default (re: logic in
# Release.add_to_manager)
handle_upcoming_major_release(
stripped_entries[index + 1:], manager
)
# Entries get copied into release line buckets as follows:
# * Features and support go into 'unreleased_feature' for use in new
# feature releases.
# * Bugfixes go into all release lines (so they can be printed in >1
# bugfix release as appropriate) as well as 'unreleased_bugfix' (so
# they can be displayed prior to release'). Caveats include bugs marked
# 'major' (they go into unreleased_feature instead) or with 'N.N+'
# (meaning they only go into release line buckets for that release and
# up.)
# * Support/feature entries marked as 'backported' go into all
# release lines as well, on the assumption that they were released to
# all active branches.
# * The 'rest' variable (which here is the bug description, vitally
# important!) is preserved by stuffing it into the focus (issue)
# object - it will get unpacked by construct_nodes() later.
else:
construct_entry_without_release(focus, issues, manager, log, rest)
if manager.unstable_prehistory:
releases.append(generate_unreleased_entry(
header="Next release",
line="unreleased",
issues=manager[0]['unreleased'],
manager=manager,
app=app,
))
else:
append_unreleased_entries(app, manager, releases)
reorder_release_entries(releases)
return releases, manager
def construct_nodes(releases):
result = []
# Reverse the list again so the final display is newest on top
for d in reversed(releases):
if not d['entries']:
continue
obj = d['obj']
entries = []
for entry in d['entries']:
# Use nodes.Node.deepcopy to deepcopy the description
# node. If this is not done, multiple references to the same
# object (e.g. a reference object in the description of #649, which
# is then copied into 2 different release lists) will end up in the
# doctree, which makes subsequent parse steps very angry (index()
# errors).
desc = entry['description'].deepcopy()
# Additionally, expand any other issue roles found in the
# description - sometimes we refer to related issues inline. (They
# can't be left as issue() objects at render time since that's
# undefined.)
# Use [:] slicing to avoid mutation during the loops.
for index, node in enumerate(desc[:]):
for subindex, subnode in enumerate(node[:]):
if isinstance(subnode, Issue):
lst = subnode['nodelist']
desc[index][subindex:subindex + 1] = lst
# Rework this entry to insert the now-rendered issue nodes in front
# of the 1st paragraph of the 'description' nodes (which should be
# the preserved LI + nested paragraph-or-more from original
# markup.)
# FIXME: why is there no "prepend a list" method?
for node in reversed(entry['nodelist']):
desc[0].insert(0, node)
entries.append(desc)
# Entry list
list_ = nodes.bullet_list('', *entries)
# Insert list into release nodelist (as it's a section)
obj['nodelist'][0].append(list_)
# Release header
header = nodes.paragraph('', '', *obj['nodelist'])
result.extend(header)
return result
class BulletListVisitor(nodes.NodeVisitor):
def __init__(self, document, app):
nodes.NodeVisitor.__init__(self, document)
self.found_changelog = False
self.app = app
def visit_bullet_list(self, node):
# The first found bullet list (which should be the first one at the top
# level of the document) is the changelog.
if not self.found_changelog:
self.found_changelog = True
# Walk + parse into release mapping
releases, _ = construct_releases(node.children, self.app)
# Construct new set of nodes to replace the old, and we're done
node.replace_self(construct_nodes(releases))
def unknown_visit(self, node):
pass
def generate_changelog(app, doctree):
# Don't scan/mutate documents that don't match the configured document name
# (which by default is ['changelog.rst', ]).
if app.env.docname not in app.config.releases_document_name:
return
# Find the first bullet-list node & replace it with our organized/parsed
# elements.
changelog_visitor = BulletListVisitor(doctree, app)
doctree.walk(changelog_visitor)
def setup(app):
for key, default in (
# Issue base URI setting: releases_issue_uri
# E.g. 'https://github.com/fabric/fabric/issues/'
('issue_uri', None),
# Release-tag base URI setting: releases_release_uri
# E.g. 'https://github.com/fabric/fabric/tree/'
('release_uri', None),
# Convenience Github version of above
('github_path', None),
# Which document to use as the changelog
('document_name', ['changelog']),
# Debug output
('debug', False),
# Whether to enable linear history during 0.x release timeline
# TODO: flip this to True by default in our 2.0 release
('unstable_prehistory', False),
):
app.add_config_value(
name='releases_{}'.format(key), default=default, rebuild='html'
)
# if a string is given for `document_name`, convert it to a list
# done to maintain backwards compatibility
# https://stackoverflow.com/questions/1303243/how-to-find-out-if-a-python-object-is-a-string
PY2 = sys.version_info[0] == 2
if PY2:
string_types = (basestring,)
else:
string_types = (str,)
if isinstance(app.config.releases_document_name, string_types):
app.config.releases_document_name = [app.config.releases_document_name]
# Register intermediate roles
for x in list(ISSUE_TYPES) + ['issue']:
add_role(app, x, issues_role)
add_role(app, 'release', release_role)
# Hook in our changelog transmutation at appropriate step
app.connect('doctree-read', generate_changelog)
# identifies the version of our extension
return {'version': __version__}
def add_role(app, name, role_obj):
# This (introspecting docutils.parser.rst.roles._roles) is the same trick
# Sphinx uses to emit warnings about double-registering; it's a PITA to try
# and configure the app early on so it doesn't emit those warnings, so we
# instead just...don't double-register. Meh.
if name not in roles._roles:
app.add_role(name, role_obj)
|
bitprophet/releases
|
releases/models.py
|
Issue.minor_releases
|
python
|
def minor_releases(self, manager):
# TODO: yea deffo need a real object for 'manager', heh. E.g. we do a
# very similar test for "do you have any actual releases yet?"
# elsewhere. (This may be fodder for changing how we roll up
# pre-major-release features though...?)
return [
key for key, value in six.iteritems(manager)
if any(x for x in value if not x.startswith('unreleased'))
]
|
Return all minor release line labels found in ``manager``.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/models.py#L69-L80
| null |
class Issue(nodes.Element):
# Technically, we just need number, but heck, you never know...
_cmp_keys = ('type', 'number', 'backported', 'major')
@property
def type(self):
return self['type_']
@property
def is_featurelike(self):
if self.type == 'bug':
return self.major
else:
return not self.backported
@property
def is_buglike(self):
return not self.is_featurelike
@property
def backported(self):
return self.get('backported', False)
@property
def major(self):
return self.get('major', False)
@property
def number(self):
return self.get('number', None)
@property
def spec(self):
return self.get('spec', None)
def __eq__(self, other):
for attr in self._cmp_keys:
if getattr(self, attr, None) != getattr(other, attr, None):
return False
return True
def __hash__(self):
return reduce(xor, [hash(getattr(self, x)) for x in self._cmp_keys])
def default_spec(self, manager):
"""
Given the current release-lines structure, return a default Spec.
Specifics:
* For feature-like issues, only the highest major release is used, so
given a ``manager`` with top level keys of ``[1, 2]``, this would
return ``Spec(">=2")``.
* When ``releases_always_forwardport_features`` is ``True``, that
behavior is nullified, and this function always returns the empty
``Spec`` (which matches any and all versions/lines).
* For bugfix-like issues, we only consider major release families which
have actual releases already.
* Thus the core difference here is that features are 'consumed' by
upcoming major releases, and bugfixes are not.
* When the ``unstable_prehistory`` setting is ``True``, the default
spec starts at the oldest non-zero release line. (Otherwise, issues
posted after prehistory ends would try being added to the 0.x part of
the tree, which makes no sense in unstable-prehistory mode.)
"""
# TODO: I feel like this + the surrounding bits in add_to_manager()
# could be consolidated & simplified...
specstr = ""
# Make sure truly-default spec skips 0.x if prehistory was unstable.
stable_families = manager.stable_families
if manager.config.releases_unstable_prehistory and stable_families:
specstr = ">={}".format(min(stable_families))
if self.is_featurelike:
# TODO: if app->config-><releases_always_forwardport_features or
# w/e
if True:
specstr = ">={}".format(max(manager.keys()))
else:
# Can only meaningfully limit to minor release buckets if they
# actually exist yet.
buckets = self.minor_releases(manager)
if buckets:
specstr = ">={}".format(max(buckets))
return Spec(specstr) if specstr else Spec()
def add_to_manager(self, manager):
"""
Given a 'manager' structure, add self to one or more of its 'buckets'.
"""
# Derive version spec allowing us to filter against major/minor buckets
spec = self.spec or self.default_spec(manager)
# Only look in appropriate major version/family; if self is an issue
# declared as living in e.g. >=2, this means we don't even bother
# looking in the 1.x family.
families = [Version(str(x)) for x in manager]
versions = list(spec.filter(families))
for version in versions:
family = version.major
# Within each family, we further limit which bugfix lines match up
# to what self cares about (ignoring 'unreleased' until later)
candidates = [
Version(x)
for x in manager[family]
if not x.startswith('unreleased')
]
# Select matching release lines (& stringify)
buckets = []
bugfix_buckets = [str(x) for x in spec.filter(candidates)]
# Add back in unreleased_* as appropriate
# TODO: probably leverage Issue subclasses for this eventually?
if self.is_buglike:
buckets.extend(bugfix_buckets)
# Don't put into JUST unreleased_bugfix; it implies that this
# major release/family hasn't actually seen any releases yet
# and only exists for features to go into.
if bugfix_buckets:
buckets.append('unreleased_bugfix')
# Obtain list of minor releases to check for "haven't had ANY
# releases yet" corner case, in which case ALL issues get thrown in
# unreleased_feature for the first release to consume.
# NOTE: assumes first release is a minor or major one,
# but...really? why would your first release be a bugfix one??
no_releases = not self.minor_releases(manager)
if self.is_featurelike or self.backported or no_releases:
buckets.append('unreleased_feature')
# Now that we know which buckets are appropriate, add ourself to
# all of them. TODO: or just...do it above...instead...
for bucket in buckets:
manager[family][bucket].append(self)
def __repr__(self):
flag = ''
if self.backported:
flag = 'backported'
elif self.major:
flag = 'major'
elif self.spec:
flag = self.spec
if flag:
flag = ' ({})'.format(flag)
return '<{issue.type} #{issue.number}{flag}>'.format(issue=self,
flag=flag)
|
bitprophet/releases
|
releases/models.py
|
Issue.default_spec
|
python
|
def default_spec(self, manager):
# TODO: I feel like this + the surrounding bits in add_to_manager()
# could be consolidated & simplified...
specstr = ""
# Make sure truly-default spec skips 0.x if prehistory was unstable.
stable_families = manager.stable_families
if manager.config.releases_unstable_prehistory and stable_families:
specstr = ">={}".format(min(stable_families))
if self.is_featurelike:
# TODO: if app->config-><releases_always_forwardport_features or
# w/e
if True:
specstr = ">={}".format(max(manager.keys()))
else:
# Can only meaningfully limit to minor release buckets if they
# actually exist yet.
buckets = self.minor_releases(manager)
if buckets:
specstr = ">={}".format(max(buckets))
return Spec(specstr) if specstr else Spec()
|
Given the current release-lines structure, return a default Spec.
Specifics:
* For feature-like issues, only the highest major release is used, so
given a ``manager`` with top level keys of ``[1, 2]``, this would
return ``Spec(">=2")``.
* When ``releases_always_forwardport_features`` is ``True``, that
behavior is nullified, and this function always returns the empty
``Spec`` (which matches any and all versions/lines).
* For bugfix-like issues, we only consider major release families which
have actual releases already.
* Thus the core difference here is that features are 'consumed' by
upcoming major releases, and bugfixes are not.
* When the ``unstable_prehistory`` setting is ``True``, the default
spec starts at the oldest non-zero release line. (Otherwise, issues
posted after prehistory ends would try being added to the 0.x part of
the tree, which makes no sense in unstable-prehistory mode.)
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/models.py#L82-L125
|
[
"def minor_releases(self, manager):\n \"\"\"\n Return all minor release line labels found in ``manager``.\n \"\"\"\n # TODO: yea deffo need a real object for 'manager', heh. E.g. we do a\n # very similar test for \"do you have any actual releases yet?\"\n # elsewhere. (This may be fodder for changing how we roll up\n # pre-major-release features though...?)\n return [\n key for key, value in six.iteritems(manager)\n if any(x for x in value if not x.startswith('unreleased'))\n ]\n"
] |
class Issue(nodes.Element):
# Technically, we just need number, but heck, you never know...
_cmp_keys = ('type', 'number', 'backported', 'major')
@property
def type(self):
return self['type_']
@property
def is_featurelike(self):
if self.type == 'bug':
return self.major
else:
return not self.backported
@property
def is_buglike(self):
return not self.is_featurelike
@property
def backported(self):
return self.get('backported', False)
@property
def major(self):
return self.get('major', False)
@property
def number(self):
return self.get('number', None)
@property
def spec(self):
return self.get('spec', None)
def __eq__(self, other):
for attr in self._cmp_keys:
if getattr(self, attr, None) != getattr(other, attr, None):
return False
return True
def __hash__(self):
return reduce(xor, [hash(getattr(self, x)) for x in self._cmp_keys])
def minor_releases(self, manager):
"""
Return all minor release line labels found in ``manager``.
"""
# TODO: yea deffo need a real object for 'manager', heh. E.g. we do a
# very similar test for "do you have any actual releases yet?"
# elsewhere. (This may be fodder for changing how we roll up
# pre-major-release features though...?)
return [
key for key, value in six.iteritems(manager)
if any(x for x in value if not x.startswith('unreleased'))
]
def add_to_manager(self, manager):
"""
Given a 'manager' structure, add self to one or more of its 'buckets'.
"""
# Derive version spec allowing us to filter against major/minor buckets
spec = self.spec or self.default_spec(manager)
# Only look in appropriate major version/family; if self is an issue
# declared as living in e.g. >=2, this means we don't even bother
# looking in the 1.x family.
families = [Version(str(x)) for x in manager]
versions = list(spec.filter(families))
for version in versions:
family = version.major
# Within each family, we further limit which bugfix lines match up
# to what self cares about (ignoring 'unreleased' until later)
candidates = [
Version(x)
for x in manager[family]
if not x.startswith('unreleased')
]
# Select matching release lines (& stringify)
buckets = []
bugfix_buckets = [str(x) for x in spec.filter(candidates)]
# Add back in unreleased_* as appropriate
# TODO: probably leverage Issue subclasses for this eventually?
if self.is_buglike:
buckets.extend(bugfix_buckets)
# Don't put into JUST unreleased_bugfix; it implies that this
# major release/family hasn't actually seen any releases yet
# and only exists for features to go into.
if bugfix_buckets:
buckets.append('unreleased_bugfix')
# Obtain list of minor releases to check for "haven't had ANY
# releases yet" corner case, in which case ALL issues get thrown in
# unreleased_feature for the first release to consume.
# NOTE: assumes first release is a minor or major one,
# but...really? why would your first release be a bugfix one??
no_releases = not self.minor_releases(manager)
if self.is_featurelike or self.backported or no_releases:
buckets.append('unreleased_feature')
# Now that we know which buckets are appropriate, add ourself to
# all of them. TODO: or just...do it above...instead...
for bucket in buckets:
manager[family][bucket].append(self)
def __repr__(self):
flag = ''
if self.backported:
flag = 'backported'
elif self.major:
flag = 'major'
elif self.spec:
flag = self.spec
if flag:
flag = ' ({})'.format(flag)
return '<{issue.type} #{issue.number}{flag}>'.format(issue=self,
flag=flag)
|
bitprophet/releases
|
releases/models.py
|
Issue.add_to_manager
|
python
|
def add_to_manager(self, manager):
# Derive version spec allowing us to filter against major/minor buckets
spec = self.spec or self.default_spec(manager)
# Only look in appropriate major version/family; if self is an issue
# declared as living in e.g. >=2, this means we don't even bother
# looking in the 1.x family.
families = [Version(str(x)) for x in manager]
versions = list(spec.filter(families))
for version in versions:
family = version.major
# Within each family, we further limit which bugfix lines match up
# to what self cares about (ignoring 'unreleased' until later)
candidates = [
Version(x)
for x in manager[family]
if not x.startswith('unreleased')
]
# Select matching release lines (& stringify)
buckets = []
bugfix_buckets = [str(x) for x in spec.filter(candidates)]
# Add back in unreleased_* as appropriate
# TODO: probably leverage Issue subclasses for this eventually?
if self.is_buglike:
buckets.extend(bugfix_buckets)
# Don't put into JUST unreleased_bugfix; it implies that this
# major release/family hasn't actually seen any releases yet
# and only exists for features to go into.
if bugfix_buckets:
buckets.append('unreleased_bugfix')
# Obtain list of minor releases to check for "haven't had ANY
# releases yet" corner case, in which case ALL issues get thrown in
# unreleased_feature for the first release to consume.
# NOTE: assumes first release is a minor or major one,
# but...really? why would your first release be a bugfix one??
no_releases = not self.minor_releases(manager)
if self.is_featurelike or self.backported or no_releases:
buckets.append('unreleased_feature')
# Now that we know which buckets are appropriate, add ourself to
# all of them. TODO: or just...do it above...instead...
for bucket in buckets:
manager[family][bucket].append(self)
|
Given a 'manager' structure, add self to one or more of its 'buckets'.
|
train
|
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/models.py#L127-L170
|
[
"def minor_releases(self, manager):\n \"\"\"\n Return all minor release line labels found in ``manager``.\n \"\"\"\n # TODO: yea deffo need a real object for 'manager', heh. E.g. we do a\n # very similar test for \"do you have any actual releases yet?\"\n # elsewhere. (This may be fodder for changing how we roll up\n # pre-major-release features though...?)\n return [\n key for key, value in six.iteritems(manager)\n if any(x for x in value if not x.startswith('unreleased'))\n ]\n",
"def default_spec(self, manager):\n \"\"\"\n Given the current release-lines structure, return a default Spec.\n\n Specifics:\n\n * For feature-like issues, only the highest major release is used, so\n given a ``manager`` with top level keys of ``[1, 2]``, this would\n return ``Spec(\">=2\")``.\n\n * When ``releases_always_forwardport_features`` is ``True``, that\n behavior is nullified, and this function always returns the empty\n ``Spec`` (which matches any and all versions/lines).\n\n * For bugfix-like issues, we only consider major release families which\n have actual releases already.\n\n * Thus the core difference here is that features are 'consumed' by\n upcoming major releases, and bugfixes are not.\n\n * When the ``unstable_prehistory`` setting is ``True``, the default\n spec starts at the oldest non-zero release line. (Otherwise, issues\n posted after prehistory ends would try being added to the 0.x part of\n the tree, which makes no sense in unstable-prehistory mode.)\n \"\"\"\n # TODO: I feel like this + the surrounding bits in add_to_manager()\n # could be consolidated & simplified...\n specstr = \"\"\n # Make sure truly-default spec skips 0.x if prehistory was unstable.\n stable_families = manager.stable_families\n if manager.config.releases_unstable_prehistory and stable_families:\n specstr = \">={}\".format(min(stable_families))\n if self.is_featurelike:\n # TODO: if app->config-><releases_always_forwardport_features or\n # w/e\n if True:\n specstr = \">={}\".format(max(manager.keys()))\n else:\n # Can only meaningfully limit to minor release buckets if they\n # actually exist yet.\n buckets = self.minor_releases(manager)\n if buckets:\n specstr = \">={}\".format(max(buckets))\n return Spec(specstr) if specstr else Spec()\n"
] |
class Issue(nodes.Element):
# Technically, we just need number, but heck, you never know...
_cmp_keys = ('type', 'number', 'backported', 'major')
@property
def type(self):
return self['type_']
@property
def is_featurelike(self):
if self.type == 'bug':
return self.major
else:
return not self.backported
@property
def is_buglike(self):
return not self.is_featurelike
@property
def backported(self):
return self.get('backported', False)
@property
def major(self):
return self.get('major', False)
@property
def number(self):
return self.get('number', None)
@property
def spec(self):
return self.get('spec', None)
def __eq__(self, other):
for attr in self._cmp_keys:
if getattr(self, attr, None) != getattr(other, attr, None):
return False
return True
def __hash__(self):
return reduce(xor, [hash(getattr(self, x)) for x in self._cmp_keys])
def minor_releases(self, manager):
"""
Return all minor release line labels found in ``manager``.
"""
# TODO: yea deffo need a real object for 'manager', heh. E.g. we do a
# very similar test for "do you have any actual releases yet?"
# elsewhere. (This may be fodder for changing how we roll up
# pre-major-release features though...?)
return [
key for key, value in six.iteritems(manager)
if any(x for x in value if not x.startswith('unreleased'))
]
def default_spec(self, manager):
"""
Given the current release-lines structure, return a default Spec.
Specifics:
* For feature-like issues, only the highest major release is used, so
given a ``manager`` with top level keys of ``[1, 2]``, this would
return ``Spec(">=2")``.
* When ``releases_always_forwardport_features`` is ``True``, that
behavior is nullified, and this function always returns the empty
``Spec`` (which matches any and all versions/lines).
* For bugfix-like issues, we only consider major release families which
have actual releases already.
* Thus the core difference here is that features are 'consumed' by
upcoming major releases, and bugfixes are not.
* When the ``unstable_prehistory`` setting is ``True``, the default
spec starts at the oldest non-zero release line. (Otherwise, issues
posted after prehistory ends would try being added to the 0.x part of
the tree, which makes no sense in unstable-prehistory mode.)
"""
# TODO: I feel like this + the surrounding bits in add_to_manager()
# could be consolidated & simplified...
specstr = ""
# Make sure truly-default spec skips 0.x if prehistory was unstable.
stable_families = manager.stable_families
if manager.config.releases_unstable_prehistory and stable_families:
specstr = ">={}".format(min(stable_families))
if self.is_featurelike:
# TODO: if app->config-><releases_always_forwardport_features or
# w/e
if True:
specstr = ">={}".format(max(manager.keys()))
else:
# Can only meaningfully limit to minor release buckets if they
# actually exist yet.
buckets = self.minor_releases(manager)
if buckets:
specstr = ">={}".format(max(buckets))
return Spec(specstr) if specstr else Spec()
def __repr__(self):
flag = ''
if self.backported:
flag = 'backported'
elif self.major:
flag = 'major'
elif self.spec:
flag = self.spec
if flag:
flag = ' ({})'.format(flag)
return '<{issue.type} #{issue.number}{flag}>'.format(issue=self,
flag=flag)
|
tompollard/tableone
|
tableone.py
|
TableOne._generate_remark_str
|
python
|
def _generate_remark_str(self, end_of_line = '\n'):
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
|
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L200-L235
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._detect_categorical_columns
|
python
|
def _detect_categorical_columns(self,data):
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
|
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L237-L261
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._std
|
python
|
def _std(self,x):
return np.nanstd(x.values,ddof=self._ddof)
|
Compute standard deviation with ddof degrees of freedom
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L275-L279
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._tukey
|
python
|
def _tukey(self,x,threshold):
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
|
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L311-L332
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._outliers
|
python
|
def _outliers(self,x):
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
|
Compute number of outliers
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L334-L339
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._far_outliers
|
python
|
def _far_outliers(self,x):
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
|
Compute number of "far out" outliers
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L341-L346
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._t1_summary
|
python
|
def _t1_summary(self,x):
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
|
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L348-L376
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._create_cont_describe
|
python
|
def _create_cont_describe(self,data):
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
|
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L378-L432
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._create_cat_describe
|
python
|
def _create_cat_describe(self,data):
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
|
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L443-L516
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._create_significance_table
|
python
|
def _create_significance_table(self,data):
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
|
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L518-L572
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._create_cont_table
|
python
|
def _create_cont_table(self,data):
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
|
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L642-L673
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._create_cat_table
|
python
|
def _create_cat_table(self,data):
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
|
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L675-L700
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._create_tableone
|
python
|
def _create_tableone(self,data):
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
|
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L702-L834
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
tableone.py
|
TableOne._create_row_labels
|
python
|
def _create_row_labels(self):
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
|
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L836-L867
| null |
class TableOne(object):
"""
If you use the tableone package, please cite:
Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source Python
package for producing summary statistics for research papers. JAMIA Open, Volume 1,
Issue 1, 1 July 2018, Pages 26-31. https://doi.org/10.1093/jamiaopen/ooy012
Create an instance of the tableone summary table.
Parameters
----------
data : pandas DataFrame
The dataset to be summarised. Rows are observations, columns are variables.
columns : list, optional
List of columns in the dataset to be included in the final table.
categorical : list, optional
List of columns that contain categorical variables.
groupby : str, optional
Optional column for stratifying the final table (default: None).
nonnormal : list, optional
List of columns that contain non-normal variables (default: None).
pval : bool, optional
Display computed p-values (default: False).
pval_adjust : str, optional
Method used to adjust p-values for multiple testing.
For a complete list, see documentation for statsmodels multipletests.
Available methods include ::
`None` : no correction applied.
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
isnull : bool, optional
Display a count of null values (default: True).
ddof : int, optional
Degrees of freedom for standard deviation calculations (default: 1).
labels : dict, optional
Dictionary of alternative labels for variables.
e.g. `labels = {'sex':'gender', 'trt':'treatment'}`
sort : bool, optional
Sort the rows alphabetically. Default (False) retains the input order
of columns.
limit : int, optional
Limit to the top N most frequent categories.
remarks : bool, optional
Add remarks on the appropriateness of the summary measures and the
statistical tests (default: True).
label_suffix : bool, optional
Append summary type (e.g. "mean (SD); median [Q1,Q3], n (%); ") to the
row label (default: False).
decimals : int or dict, optional
Number of decimal places to display. An integer applies the rule to all
variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`) applies
the rule per variable, defaulting to 1 place for unspecified variables.
For continuous variables, applies to all summary statistics (e.g. mean and
standard deviation). For categorical variables, applies to percentage only.
Attributes
----------
tableone : dataframe
Summary of the data (i.e., the "Table 1").
"""
def __init__(self, data, columns=None, categorical=None, groupby=None,
nonnormal=None, pval=False, pval_adjust=None, isnull=True,
ddof=1, labels=None, sort=False, limit=None, remarks=True,
label_suffix=False, decimals=1):
# check input arguments
if not groupby:
groupby = ''
elif groupby and type(groupby) == list:
groupby = groupby[0]
if not nonnormal:
nonnormal=[]
elif nonnormal and type(nonnormal) == str:
nonnormal = [nonnormal]
# if columns not specified, use all columns
if not columns:
columns = data.columns.get_values()
# check that the columns exist in the dataframe
if not set(columns).issubset(data.columns):
notfound = list(set(columns) - set(data.columns))
raise InputError('Columns not found in dataset: {}'.format(notfound))
# check for duplicate columns
dups = data[columns].columns[data[columns].columns.duplicated()].unique()
if not dups.empty:
raise InputError('Input contains duplicate columns: {}'.format(dups))
# if categorical not specified, try to identify categorical
if not categorical and type(categorical) != list:
categorical = self._detect_categorical_columns(data[columns])
if pval and not groupby:
raise InputError("If pval=True then the groupby must be specified.")
self._columns = list(columns)
self._isnull = isnull
self._continuous = [c for c in columns if c not in categorical + [groupby]]
self._categorical = categorical
self._nonnormal = nonnormal
self._pval = pval
self._pval_adjust = pval_adjust
self._sort = sort
self._groupby = groupby
self._ddof = ddof # degrees of freedom for standard deviation
self._alt_labels = labels
self._limit = limit
self._remarks = remarks
self._label_suffix = label_suffix
self._decimals = decimals
# output column names that cannot be contained in a groupby
self._reserved_columns = ['isnull', 'pval', 'ptest', 'pval (adjusted)']
if self._groupby:
self._groupbylvls = sorted(data.groupby(groupby).groups.keys())
# check that the group levels do not include reserved words
for level in self._groupbylvls:
if level in self._reserved_columns:
raise InputError('Group level contained "{}", a reserved keyword for tableone.'.format(level))
else:
self._groupbylvls = ['overall']
# forgive me jraffa
if self._pval:
self._significance_table = self._create_significance_table(data)
# correct for multiple testing
if self._pval and self._pval_adjust:
alpha=0.05
adjusted = multitest.multipletests(self._significance_table['pval'],
alpha=alpha, method=self._pval_adjust)
self._significance_table['pval (adjusted)'] = adjusted[1]
self._significance_table['adjust method'] = self._pval_adjust
# create descriptive tables
if self._categorical:
self.cat_describe = self._create_cat_describe(data)
self.cat_table = self._create_cat_table(data)
# create tables of continuous and categorical variables
if self._continuous:
self.cont_describe = self._create_cont_describe(data)
self.cont_table = self._create_cont_table(data)
# combine continuous variables and categorical variables into table 1
self.tableone = self._create_tableone(data)
# self._remarks_str = self._generate_remark_str()
# wrap dataframe methods
self.head = self.tableone.head
self.tail = self.tableone.tail
self.to_csv = self.tableone.to_csv
self.to_excel = self.tableone.to_excel
self.to_html = self.tableone.to_html
self.to_json = self.tableone.to_json
self.to_latex = self.tableone.to_latex
def __str__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def __repr__(self):
return self.tableone.to_string() + self._generate_remark_str('\n')
def _repr_html_(self):
return self.tableone._repr_html_() + self._generate_remark_str('<br />')
def _generate_remark_str(self, end_of_line = '\n'):
"""
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
"""
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
def _detect_categorical_columns(self,data):
"""
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
"""
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
def _q25(self,x):
"""
Compute percentile (25th)
"""
return np.nanpercentile(x.values,25)
def _q75(self,x):
"""
Compute percentile (75th)
"""
return np.nanpercentile(x.values,75)
def _std(self,x):
"""
Compute standard deviation with ddof degrees of freedom
"""
return np.nanstd(x.values,ddof=self._ddof)
def _diptest(self,x):
"""
Compute Hartigan Dip Test for modality.
p < 0.05 suggests possible multimodality.
"""
p = modality.hartigan_diptest(x.values)
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _normaltest(self,x):
"""
Compute test for normal distribution.
Null hypothesis: x comes from a normal distribution
p < alpha suggests the null hypothesis can be rejected.
"""
if len(x.values[~np.isnan(x.values)]) > 10:
stat,p = stats.normaltest(x.values, nan_policy='omit')
else:
p = None
# dropna=False argument in pivot_table does not function as expected
# return -1 instead of None
if pd.isnull(p):
return -1
return p
def _tukey(self,x,threshold):
"""
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
"""
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
def _outliers(self,x):
"""
Compute number of outliers
"""
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
def _far_outliers(self,x):
"""
Compute number of "far out" outliers
"""
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
def _t1_summary(self,x):
"""
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
"""
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
def _create_cont_describe(self,data):
"""
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
"""
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError("""The following continuous column(s) have non-numeric values: {}.
Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
def _format_cat(self,row):
var = row.name[0]
if var in self._decimals:
n = self._decimals[var]
else:
n = 1
f = '{{:.{}f}}'.format(n)
return f.format(row.percent)
def _create_cat_describe(self,data):
"""
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
"""
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
"""
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
def _p_test(self,v,grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels):
"""
Compute p-values.
Parameters
----------
v : str
Name of the variable to be tested.
grouped_data : list
List of lists of values to be tested.
is_continuous : bool
True if the variable is continuous.
is_categorical : bool
True if the variable is categorical.
is_normal : bool
True if the variable is normally distributed.
min_observed : int
Minimum number of values across groups for the variable.
catlevels : list
Sorted list of levels for categorical variables.
Returns
----------
pval : float
The computed p-value.
ptest : str
The name of the test used to compute the p-value.
"""
# no test by default
pval=np.nan
ptest='Not tested'
# do not test if the variable has no observations in a level
if min_observed == 0:
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
# continuous
if is_continuous and is_normal and len(grouped_data)==2 :
ptest = 'Two Sample T-test'
test_stat, pval = stats.ttest_ind(*grouped_data,equal_var=False)
elif is_continuous and is_normal:
# normally distributed
ptest = 'One-way ANOVA'
test_stat, pval = stats.f_oneway(*grouped_data)
elif is_continuous and not is_normal:
# non-normally distributed
ptest = 'Kruskal-Wallis'
test_stat, pval = stats.kruskal(*grouped_data)
# categorical
elif is_categorical:
# default to chi-squared
ptest = 'Chi-squared'
chi2, pval, dof, expected = stats.chi2_contingency(grouped_data)
# if any expected cell counts are < 5, chi2 may not be valid
# if this is a 2x2, switch to fisher exact
if expected.min() < 5:
if grouped_data.shape == (2,2):
ptest = "Fisher's exact"
oddsratio, pval = stats.fisher_exact(grouped_data)
else:
ptest = 'Chi-squared (warning: expected count < 5)'
warnings.warn('No p-value was computed for {} due to the low number of observations.'.format(v))
return pval,ptest
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_cat_table(self,data):
"""
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
"""
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
def _create_tableone(self,data):
"""
Create table 1 by combining the continuous and categorical tables.
Returns
----------
table : pandas DataFrame
The complete table one.
"""
if self._continuous and self._categorical:
# support pandas<=0.22
try:
table = pd.concat([self.cont_table,self.cat_table],sort=False)
except:
table = pd.concat([self.cont_table,self.cat_table])
elif self._continuous:
table = self.cont_table
elif self._categorical:
table = self.cat_table
# round pval column and convert to string
if self._pval and self._pval_adjust:
table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001'
elif self._pval:
table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str)
table.loc[table['pval'] == '0.000', 'pval'] = '<0.001'
# sort the table rows
table.reset_index().set_index(['variable','level'], inplace=True)
if self._sort:
# alphabetical
new_index = sorted(table.index.values)
else:
# sort by the columns argument
new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0]))
table = table.reindex(new_index)
# if a limit has been set on the number of categorical variables
# then re-order the variables by frequency
if self._limit:
levelcounts = data[self._categorical].nunique()
levelcounts = levelcounts[levelcounts >= self._limit]
for v,_ in levelcounts.iteritems():
count = data[v].value_counts().sort_values(ascending=False)
new_index = [(v, i) for i in count.index]
# restructure to match orig_index
new_index_array=np.empty((len(new_index),), dtype=object)
new_index_array[:]=[tuple(i) for i in new_index]
orig_index = table.index.values.copy()
orig_index[table.index.get_loc(v)] = new_index_array
table = table.reindex(orig_index)
# inserts n row
n_row = pd.DataFrame(columns = ['variable','level','isnull'])
n_row.set_index(['variable','level'], inplace=True)
n_row.loc['n', ''] = None
# support pandas<=0.22
try:
table = pd.concat([n_row,table],sort=False)
except:
table = pd.concat([n_row,table])
if self._groupbylvls == ['overall']:
table.loc['n','overall'] = len(data.index)
else:
for g in self._groupbylvls:
ct = data[self._groupby][data[self._groupby]==g].count()
table.loc['n',g] = ct
# only display data in first level row
dupe_mask = table.groupby(level=[0]).cumcount().ne(0)
dupe_columns = ['isnull']
optional_columns = ['pval','pval (adjusted)','ptest']
for col in optional_columns:
if col in table.columns.values:
dupe_columns.append(col)
table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')
# remove empty column added above
table.drop([''], axis=1, inplace=True)
# remove isnull column if not needed
if not self._isnull:
table.drop('isnull',axis=1,inplace=True)
# replace nans with empty strings
table.fillna('',inplace=True)
# add column index
if not self._groupbylvls == ['overall']:
# rename groupby variable if requested
c = self._groupby
if self._alt_labels:
if self._groupby in self._alt_labels:
c = self._alt_labels[self._groupby]
c = 'Grouped by {}'.format(c)
table.columns = pd.MultiIndex.from_product([[c], table.columns])
# display alternative labels if assigned
table.rename(index=self._create_row_labels(), inplace=True, level=0)
# if a limit has been set on the number of categorical variables
# limit the number of categorical variables that are displayed
if self._limit:
table = table.groupby('variable').head(self._limit)
# re-order the columns in a consistent fashion
if self._groupby:
cols = table.columns.levels[1].values
else:
cols = table.columns.values
if 'isnull' in cols:
cols = ['isnull'] + [x for x in cols if x != 'isnull']
# iterate through each optional column
# if they exist, put them at the end of the dataframe
# ensures the last 3 columns will be in the same order as optional_columns
for col in optional_columns:
if col in cols:
cols = [x for x in cols if x != col] + [col]
if self._groupby:
table = table.reindex(cols, axis=1, level=1)
else:
table = table.reindex(cols, axis=1)
return table
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
# warnings
def _non_continuous_warning(self, c):
warnings.warn('''"{}" has all non-numeric values. Consider including it in the
list of categorical variables.'''.format(c), RuntimeWarning, stacklevel=2)
|
tompollard/tableone
|
modality.py
|
dip_pval_tabinterpol
|
python
|
def dip_pval_tabinterpol(dip, N):
'''
dip - dip value computed from dip_from_cdf
N - number of observations
'''
# if qDiptab_df is None:
# raise DataError("Tabulated p-values not available. See installation instructions.")
if np.isnan(N) or N < 10:
return np.nan
qDiptab_dict = {'0': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0625,
9: 0.0555555555555556,
10: 0.05,
15: 0.0341378172277919,
20: 0.033718563622065004,
30: 0.0262674485075642,
50: 0.0218544781364545,
100: 0.0164852597438403,
200: 0.0111236388849688,
500: 0.007554885975761959,
1000: 0.00541658127872122,
2000: 0.0039043999745055702,
5000: 0.00245657785440433,
10000: 0.00174954269199566,
20000: 0.00119458814106091,
40000: 0.000852415648011777,
72000: 0.000644400053256997},
'0.01': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0625,
9: 0.0613018090298924,
10: 0.0610132555623269,
15: 0.0546284208048975,
20: 0.0474333740698401,
30: 0.0395871890405749,
50: 0.0314400501999916,
100: 0.022831985803043,
200: 0.0165017735429825,
500: 0.0106403461127515,
1000: 0.0076028674530018705,
2000: 0.0054166418179658294,
5000: 0.0034480928223332603,
10000: 0.00244595133885302,
20000: 0.00173435346896287,
40000: 0.00122883479310665,
72000: 0.000916872204484283},
'0.02': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0656911994503283,
9: 0.0658615858179315,
10: 0.0651627333214016,
15: 0.0572191260231815,
20: 0.0490891387627092,
30: 0.0414574606741673,
50: 0.0329008160470834,
100: 0.0238917486442849,
200: 0.0172594157992489,
500: 0.0111255573208294,
1000: 0.00794987834644799,
2000: 0.0056617138625232296,
5000: 0.00360473943713036,
10000: 0.00255710802275612,
20000: 0.0018119443458468102,
40000: 0.0012846930445701802,
72000: 0.0009579329467655321},
'0.05': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0725717816250742,
8: 0.0738651136071762,
9: 0.0732651142535317,
10: 0.0718321619656165,
15: 0.0610087367689692,
20: 0.052719998201553,
30: 0.0444462614069956,
50: 0.0353023819040016,
100: 0.0256559537977579,
200: 0.0185259426032926,
500: 0.0119353655328931,
1000: 0.0085216518343594,
2000: 0.00607120971135229,
5000: 0.0038632654801084897,
10000: 0.00273990955227265,
20000: 0.00194259470485893,
40000: 0.0013761765052555301,
72000: 0.00102641863872347},
'0.1': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0817315478539489,
8: 0.0820045917762512,
9: 0.0803941629593475,
10: 0.077966212182459,
15: 0.0642657137330444,
20: 0.0567795509056742,
30: 0.0473998525042686,
50: 0.0377279973102482,
100: 0.0273987414570948,
200: 0.0197917612637521,
500: 0.0127411306411808,
1000: 0.00909775605533253,
2000: 0.0064762535755248,
5000: 0.00412089506752692,
10000: 0.0029225480567908,
20000: 0.00207173719623868,
40000: 0.0014675150200632301,
72000: 0.0010949515421800199},
'0.2': {4: 0.125,
5: 0.1,
6: 0.0924514470941933,
7: 0.0940590181922527,
8: 0.0922700601131892,
9: 0.0890432420913848,
10: 0.0852835359834564,
15: 0.0692234107989591,
20: 0.0620134674468181,
30: 0.0516677370374349,
50: 0.0410699984399582,
100: 0.0298109370830153,
200: 0.0215233745778454,
500: 0.0138524542751814,
1000: 0.00988924521014078,
2000: 0.00703573098590029,
5000: 0.00447640050137479,
10000: 0.00317374638422465,
20000: 0.00224993202086955,
40000: 0.00159376453672466,
72000: 0.00118904090369415},
'0.3': {4: 0.125,
5: 0.1,
6: 0.103913431059949,
7: 0.10324449080087102,
8: 0.0996737189599363,
9: 0.0950811420297928,
10: 0.0903204173707099,
15: 0.0745462114365167,
20: 0.0660163872069048,
30: 0.0551037519001622,
50: 0.0437704598622665,
100: 0.0317771496530253,
200: 0.0229259769870428,
500: 0.0147536004288476,
1000: 0.0105309297090482,
2000: 0.007494212545892991,
5000: 0.00476555693102276,
10000: 0.00338072258533527,
20000: 0.00239520831473419,
40000: 0.00169668445506151,
72000: 0.00126575197699874},
'0.4': {4: 0.125,
5: 0.10872059357632902,
6: 0.113885220640212,
7: 0.110964599995697,
8: 0.10573353180273701,
9: 0.0999380897811046,
10: 0.0943334983745117,
15: 0.0792030878981762,
20: 0.0696506075066401,
30: 0.058265005347492994,
50: 0.0462925642671299,
100: 0.0336073821590387,
200: 0.024243848341112,
500: 0.0155963185751048,
1000: 0.0111322726797384,
2000: 0.007920878896017329,
5000: 0.005037040297500721,
10000: 0.0035724387653598205,
20000: 0.00253036792824665,
40000: 0.0017925341833790601,
72000: 0.00133750966361506},
'0.5': {4: 0.125,
5: 0.12156379802641401,
6: 0.123071187137781,
7: 0.11780784650433501,
8: 0.11103512984770501,
9: 0.10415356007586801,
10: 0.0977817630384725,
15: 0.083621033469191,
20: 0.0733437740592714,
30: 0.0614510857304343,
50: 0.048851155289608,
100: 0.0354621760592113,
200: 0.025584358256487003,
500: 0.0164519238025286,
1000: 0.0117439009052552,
2000: 0.008355737247680059,
5000: 0.0053123924740821294,
10000: 0.00376734715752209,
20000: 0.00266863168718114,
40000: 0.00189061261635977,
72000: 0.00141049709228472},
'0.6': {4: 0.125,
5: 0.134318918697053,
6: 0.13186973390253,
7: 0.124216086833531,
8: 0.11592005574998801,
9: 0.10800780236193198,
10: 0.102180866696628,
15: 0.0881198482202905,
20: 0.0776460662880254,
30: 0.0649164408053978,
50: 0.0516145897865757,
100: 0.0374805844550272,
200: 0.0270252129816288,
500: 0.017383057902553,
1000: 0.012405033293814,
2000: 0.00882439333812351,
5000: 0.00560929919359959,
10000: 0.00397885007249132,
20000: 0.0028181999035216,
40000: 0.00199645471886179,
72000: 0.00148936709298802},
'0.7': {4: 0.13255954878268902,
5: 0.14729879897625198,
6: 0.140564796497941,
7: 0.130409013968317,
8: 0.120561479262465,
9: 0.112512617124951,
10: 0.10996094814295099,
15: 0.093124666680253,
20: 0.0824558407118372,
30: 0.0689178762425442,
50: 0.0548121932066019,
100: 0.0398046179116599,
200: 0.0286920262150517,
500: 0.0184503949887735,
1000: 0.0131684179320803,
2000: 0.009367858207170609,
5000: 0.00595352728377949,
10000: 0.00422430013176233,
20000: 0.00299137548142077,
40000: 0.00211929748381704,
72000: 0.00158027541945626},
'0.8': {4: 0.15749736904023498,
5: 0.161085025702604,
6: 0.14941924112913002,
7: 0.136639642123068,
8: 0.125558759034845,
9: 0.12291503348081699,
10: 0.11884476721158699,
15: 0.0996694393390689,
20: 0.08834462700173701,
30: 0.0739249074078291,
50: 0.0588230482851366,
100: 0.0427283846799166,
200: 0.0308006766341406,
500: 0.0198162679782071,
1000: 0.0141377942603047,
2000: 0.01005604603884,
5000: 0.00639092280563517,
10000: 0.00453437508148542,
20000: 0.00321024899920135,
40000: 0.0022745769870358102,
72000: 0.00169651643860074},
'0.9': {4: 0.18740187880755899,
5: 0.176811998476076,
6: 0.159137064572627,
7: 0.144240669035124,
8: 0.141841067033899,
9: 0.136412639387084,
10: 0.130462149644819,
15: 0.11008749690090598,
20: 0.0972346018122903,
30: 0.0814791379390127,
50: 0.0649136324046767,
100: 0.047152783315718,
200: 0.0339967814293504,
500: 0.0218781313182203,
1000: 0.0156148055023058,
2000: 0.0111019116837591,
5000: 0.00705566126234625,
10000: 0.00500178808402368,
20000: 0.00354362220314155,
40000: 0.00250999080890397,
72000: 0.0018730618472582602},
'0.95': {4: 0.20726978858735998,
5: 0.18639179602794398,
6: 0.164769608513302,
7: 0.159903395678336,
8: 0.153978303998561,
9: 0.14660378495401902,
10: 0.139611395137099,
15: 0.118760769203664,
20: 0.105130218270636,
30: 0.0881689143126666,
50: 0.0702737877191269,
100: 0.0511279442868827,
200: 0.0368418413878307,
500: 0.0237294742633411,
1000: 0.0169343970067564,
2000: 0.0120380990328341,
5000: 0.0076506368153935,
10000: 0.00542372242836395,
20000: 0.00384330190244679,
40000: 0.00272375073486223,
72000: 0.00203178401610555},
'0.98': {4: 0.22375580462922195,
5: 0.19361253363045,
6: 0.17917654739278197,
7: 0.17519655327122302,
8: 0.16597856724751,
9: 0.157084065653166,
10: 0.150961728882481,
15: 0.128890475210055,
20: 0.11430970428125302,
30: 0.0960564383013644,
50: 0.0767095886079179,
100: 0.0558022052195208,
200: 0.0402729850316397,
500: 0.025919578977657003,
1000: 0.018513067368104,
2000: 0.0131721010552576,
5000: 0.00836821687047215,
10000: 0.00592656681022859,
20000: 0.00420258799378253,
40000: 0.00298072958568387,
72000: 0.00222356097506054},
'0.99': {4: 0.231796258864192,
5: 0.19650913979884502,
6: 0.191862827995563,
7: 0.184118659121501,
8: 0.172988528276759,
9: 0.164164643657217,
10: 0.159684158858235,
15: 0.13598356863636,
20: 0.120624043335821,
30: 0.101478558893837,
50: 0.0811998415355918,
100: 0.059024132304226,
200: 0.0426864799777448,
500: 0.0274518022761997,
1000: 0.0196080260483234,
2000: 0.0139655122281969,
5000: 0.00886357892854914,
10000: 0.00628034732880374,
20000: 0.00445774902155711,
40000: 0.00315942194040388,
72000: 0.00235782814777627},
'0.995': {4: 0.23726374382677898,
5: 0.198159967287576,
6: 0.20210197104296804,
7: 0.19101439617430602,
8: 0.179010413496374,
9: 0.172821674582338,
10: 0.16719524735674,
15: 0.14245248368127697,
20: 0.126552378036739,
30: 0.10650487144103,
50: 0.0852854646662134,
100: 0.0620425065165146,
200: 0.044958959158761,
500: 0.0288986369564301,
1000: 0.0206489568587364,
2000: 0.0146889122204488,
5000: 0.00934162787186159,
10000: 0.00661030641550873,
20000: 0.00469461513212743,
40000: 0.0033273652798148,
72000: 0.00248343580127067},
'0.998': {4: 0.241992892688593,
5: 0.19924427936243302,
6: 0.213015781111186,
7: 0.198216795232182,
8: 0.186504388711178,
9: 0.182555283567818,
10: 0.175419540856082,
15: 0.15017281653074202,
20: 0.13360135382395,
30: 0.112724636524262,
50: 0.0904847827490294,
100: 0.0658016011466099,
200: 0.0477643873749449,
500: 0.0306813505050163,
1000: 0.0219285176765082,
2000: 0.0156076779647454,
5000: 0.009932186363240291,
10000: 0.00702254699967648,
20000: 0.004994160691291679,
40000: 0.00353988965698579,
72000: 0.00264210826339498},
'0.999': {4: 0.244369839049632,
5: 0.199617527406166,
6: 0.219518627282415,
7: 0.20234101074826102,
8: 0.19448404115794,
9: 0.188658833121906,
10: 0.180611195797351,
15: 0.15545613369632802,
20: 0.138569903791767,
30: 0.117164140184417,
50: 0.0940930106666244,
100: 0.0684479731118028,
200: 0.0497198001867437,
500: 0.0320170996823189,
1000: 0.0228689168972669,
2000: 0.0162685615996248,
5000: 0.0103498795291629,
10000: 0.0073182262815645795,
20000: 0.00520917757743218,
40000: 0.00369400045486625,
72000: 0.0027524322157581},
'0.9995': {4: 0.245966625504691,
5: 0.19980094149902802,
6: 0.22433904739444602,
7: 0.205377566346832,
8: 0.200864297005026,
9: 0.19408912076824603,
10: 0.18528641605039603,
15: 0.160896499106958,
20: 0.14336916123968,
30: 0.12142585990898701,
50: 0.0974904344916743,
100: 0.0709169443994193,
200: 0.0516114611801451,
500: 0.0332452747332959,
1000: 0.023738710122235003,
2000: 0.0168874937789415,
5000: 0.0107780907076862,
10000: 0.0076065423418208,
20000: 0.005403962359243721,
40000: 0.00383345715372182,
72000: 0.0028608570740143},
'0.9998': {4: 0.24743959723326198,
5: 0.19991708183427104,
6: 0.22944933215424101,
7: 0.208306562526874,
8: 0.20884999705022897,
9: 0.19915700809389003,
10: 0.19120308390504398,
15: 0.16697940794624802,
20: 0.148940116394883,
30: 0.126733051889401,
50: 0.10228420428399698,
100: 0.0741183486081263,
200: 0.0540543978864652,
500: 0.0348335698576168,
1000: 0.0248334158891432,
2000: 0.0176505093388153,
5000: 0.0113184316868283,
10000: 0.00795640367207482,
20000: 0.00564540201704594,
40000: 0.0040079346963469605,
72000: 0.00298695044508003},
'0.9999': {4: 0.24823065965663801,
5: 0.19995902909307503,
6: 0.232714530449602,
7: 0.209866047852379,
8: 0.212556040406219,
9: 0.20288159843655804,
10: 0.19580515933918397,
15: 0.17111793515551002,
20: 0.152832538183622,
30: 0.131198578897542,
50: 0.104680624334611,
100: 0.0762579402903838,
200: 0.0558704526182638,
500: 0.0359832389317461,
1000: 0.0256126573433596,
2000: 0.0181944265400504,
5000: 0.0117329446468571,
10000: 0.0082270524584354,
20000: 0.00580460792299214,
40000: 0.00414892737222885,
72000: 0.00309340092038059},
'0.99995': {4: 0.248754269146416,
5: 0.19997839537608197,
6: 0.236548128358969,
7: 0.21096757693345103,
8: 0.21714917413729898,
9: 0.205979795735129,
10: 0.20029398089673,
15: 0.17590050570443203,
20: 0.15601016361897102,
30: 0.133691739483444,
50: 0.107496694235039,
100: 0.0785735967934979,
200: 0.0573877056330228,
500: 0.0369051995840645,
1000: 0.0265491336936829,
2000: 0.0186226037818523,
5000: 0.0119995948968375,
10000: 0.00852240989786251,
20000: 0.00599774739593151,
40000: 0.0042839159079761,
72000: 0.00319932767198801},
'0.99998': {4: 0.24930203997425898,
5: 0.199993151405815,
6: 0.2390887911995,
7: 0.212233348558702,
8: 0.22170007640450304,
9: 0.21054115498898,
10: 0.20565108964621898,
15: 0.18185667601316602,
20: 0.16131922583934502,
30: 0.137831637950694,
50: 0.11140887547015,
100: 0.0813458356889133,
200: 0.0593365901653878,
500: 0.0387221159256424,
1000: 0.027578430100535997,
2000: 0.0193001796565433,
5000: 0.0124410052027886,
10000: 0.00892863905540303,
20000: 0.00633099254378114,
40000: 0.0044187010443287895,
72000: 0.00332688234611187},
'0.99999': {4: 0.24945965232322498,
5: 0.199995525025673,
6: 0.24010356643629502,
7: 0.21266103831250602,
8: 0.225000835357532,
9: 0.21180033095039003,
10: 0.209682048785853,
15: 0.185743454151004,
20: 0.165568255916749,
30: 0.14155750962435099,
50: 0.113536607717411,
100: 0.0832963013755522,
200: 0.0607646310473911,
500: 0.039930259057650005,
1000: 0.0284430733108,
2000: 0.0196241518040617,
5000: 0.0129467396733128,
10000: 0.009138539330002129,
20000: 0.00656987109386762,
40000: 0.00450818604569179,
72000: 0.00339316094477355},
'1': {4: 0.24974836247845,
5: 0.199999835639211,
6: 0.24467288361776798,
7: 0.21353618608817,
8: 0.23377291968768302,
9: 0.21537991431762502,
10: 0.221530282182963,
15: 0.19224056333056197,
20: 0.175834459522789,
30: 0.163833046059817,
50: 0.11788671686531199,
100: 0.0926780423096737,
200: 0.0705309107882395,
500: 0.0431448163617178,
1000: 0.0313640941982108,
2000: 0.0213081254074584,
5000: 0.014396063834027,
10000: 0.00952234579566773,
20000: 0.006858294480462271,
40000: 0.00513477467565583,
72000: 0.00376331697005859}}
qDiptab_df = pd.DataFrame(qDiptab_dict)
diptable = np.array(qDiptab_df)
ps = np.array(qDiptab_df.columns).astype(float)
Ns = np.array(qDiptab_df.index)
if N >= Ns[-1]:
dip = transform_dip_to_other_nbr_pts(dip, N, Ns[-1]-0.1)
N = Ns[-1]-0.1
iNlow = np.nonzero(Ns < N)[0][-1]
qN = (N-Ns[iNlow])/(Ns[iNlow+1]-Ns[iNlow])
dip_sqrtN = np.sqrt(N)*dip
dip_interpol_sqrtN = (
np.sqrt(Ns[iNlow])*diptable[iNlow, :] + qN*(
np.sqrt(Ns[iNlow+1])*diptable[iNlow+1, :]-np.sqrt(Ns[iNlow])*diptable[iNlow, :]))
if not (dip_interpol_sqrtN < dip_sqrtN).any():
return 1
iplow = np.nonzero(dip_interpol_sqrtN < dip_sqrtN)[0][-1]
if iplow == len(dip_interpol_sqrtN) - 1:
return 0
qp = (dip_sqrtN-dip_interpol_sqrtN[iplow])/(dip_interpol_sqrtN[iplow+1]-dip_interpol_sqrtN[iplow])
p_interpol = ps[iplow] + qp*(ps[iplow+1]-ps[iplow])
return 1 - p_interpol
|
dip - dip value computed from dip_from_cdf
N - number of observations
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/modality.py#L127-L713
| null |
# ###################################### #
# #
# Updated by: Tom Pollard (2018.03.19) #
# Author: Kerstin Johnsson #
# License: MIT License #
# Available from: #
# https://github.com/kjohnsson/modality #
# #
# ###################################### #
import numpy as np
from scipy.special import beta as betafun
# import matplotlib.pyplot as plt
from scipy.optimize import brentq
import os
import pandas as pd
np.random.seed(1337)
# import pickle
def generate_data(peaks=2, n=None, mu=None, std=None):
# Generate parameters if not provided
if not n:
n = [5000] * peaks
if not mu:
mu = np.random.randint(0,30,peaks)
if not std:
std = [1.0] * peaks
# generate distributions then append
dists = []
for i in range(peaks):
tmp = np.random.normal(loc=mu[i], scale=std[i], size=n[i])
dists.append(tmp)
data = np.concatenate(dists)
return data
def hartigan_diptest(data):
'''
P-value according to Hartigan's dip test for unimodality.
The dip is computed using the function
dip_and_closest_unimodal_from_cdf. From this the p-value is
interpolated using a table imported from the R package diptest.
References:
Hartigan and Hartigan (1985): The dip test of unimodality.
The Annals of Statistics. 13(1).
Input:
data - one-dimensional data set.
Value:
p-value for the test.
'''
try:
p = pval_hartigan(data[~np.isnan(data)])
except:
p = np.nan
return p
def pval_hartigan(data):
xF, yF = cum_distr(data)
dip = dip_from_cdf(xF, yF)
return dip_pval_tabinterpol(dip, len(data))
def cum_distr(data, w=None):
if w is None:
w = np.ones(len(data))*1./len(data)
eps = 1e-10
data_ord = np.argsort(data)
data_sort = data[data_ord]
w_sort = w[data_ord]
data_sort, indices = unique(data_sort, return_index=True, eps=eps, is_sorted=True)
if len(indices) < len(data_ord):
w_unique = np.zeros(len(indices))
for i in range(len(indices)-1):
w_unique[i] = np.sum(w_sort[indices[i]:indices[i+1]])
w_unique[-1] = np.sum(w_sort[indices[-1]:])
w_sort = w_unique
wcum = np.cumsum(w_sort)
wcum /= wcum[-1]
N = len(data_sort)
x = np.empty(2*N)
x[2*np.arange(N)] = data_sort
x[2*np.arange(N)+1] = data_sort
y = np.empty(2*N)
y[0] = 0
y[2*np.arange(N)+1] = wcum
y[2*np.arange(N-1)+2] = wcum[:-1]
return x, y
def unique(data, return_index, eps, is_sorted=True):
if not is_sorted:
ord = np.argsort(data)
rank = np.argsort(ord)
data_sort = data[ord]
else:
data_sort = data
isunique_sort = np.ones(len(data_sort), dtype='bool')
j = 0
for i in range(1, len(data_sort)):
if data_sort[i] - data_sort[j] < eps:
isunique_sort[i] = False
else:
j = i
if not is_sorted:
isunique = isunique_sort[rank]
data_unique = data[isunique]
else:
data_unique = data[isunique_sort]
if not return_index:
return data_unique
if not is_sorted:
ind_unique = np.nonzero(isunique)[0]
else:
ind_unique = np.nonzero(isunique_sort)[0]
return data_unique, ind_unique
def dip_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12):
dip, _ = dip_and_closest_unimodal_from_cdf(xF, yF, plotting, verbose, eps)
return dip
def transform_dip_to_other_nbr_pts(dip_n, n, m):
dip_m = np.sqrt(n/m)*dip_n
return dip_m
def dip_and_closest_unimodal_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12):
'''
Dip computed as distance between empirical distribution function (EDF) and
cumulative distribution function for the unimodal distribution with
smallest such distance. The optimal unimodal distribution is found by
the algorithm presented in
Hartigan (1985): Computation of the dip statistic to test for
unimodaliy. Applied Statistics, vol. 34, no. 3
If the plotting option is enabled the optimal unimodal distribution
function is plotted along with (xF, yF-dip) and (xF, yF+dip)
xF - x-coordinates for EDF
yF - y-coordinates for EDF
'''
## TODO! Preprocess xF and yF so that yF increasing and xF does
## not have more than two copies of each x-value.
if (xF[1:]-xF[:-1] < -eps).any():
raise ValueError('Need sorted x-values to compute dip')
if (yF[1:]-yF[:-1] < -eps).any():
raise ValueError('Need sorted y-values to compute dip')
# if plotting:
# Nplot = 5
# bfig = plt.figure(figsize=(12, 3))
# i = 1 # plot index
D = 0 # lower bound for dip*2
# [L, U] is interval where we still need to find unimodal function,
# the modal interval
L = 0
U = len(xF) - 1
# iGfin are the indices of xF where the optimal unimodal distribution is greatest
# convex minorant to (xF, yF+dip)
# iHfin are the indices of xF where the optimal unimodal distribution is least
# concave majorant to (xF, yF-dip)
iGfin = L
iHfin = U
while 1:
iGG = greatest_convex_minorant_sorted(xF[L:(U+1)], yF[L:(U+1)])
iHH = least_concave_majorant_sorted(xF[L:(U+1)], yF[L:(U+1)])
iG = np.arange(L, U+1)[iGG]
iH = np.arange(L, U+1)[iHH]
# Interpolate. First and last point are in both and does not need
# interpolation. Might cause trouble if included due to possiblity
# of infinity slope at beginning or end of interval.
if iG[0] != iH[0] or iG[-1] != iH[-1]:
raise ValueError('Convex minorant and concave majorant should start and end at same points.')
hipl = np.interp(xF[iG[1:-1]], xF[iH], yF[iH])
gipl = np.interp(xF[iH[1:-1]], xF[iG], yF[iG])
hipl = np.hstack([yF[iH[0]], hipl, yF[iH[-1]]])
gipl = np.hstack([yF[iG[0]], gipl, yF[iG[-1]]])
#hipl = lin_interpol_sorted(xF[iG], xF[iH], yF[iH])
#gipl = lin_interpol_sorted(xF[iH], xF[iG], yF[iG])
# Find largest difference between GCM and LCM.
gdiff = hipl - yF[iG]
hdiff = yF[iH] - gipl
imaxdiffg = np.argmax(gdiff)
imaxdiffh = np.argmax(hdiff)
d = max(gdiff[imaxdiffg], hdiff[imaxdiffh])
# # Plot current GCM and LCM.
# if plotting:
# if i > Nplot:
# bfig = plt.figure(figsize=(12, 3))
# i = 1
# bax = bfig.add_subplot(1, Nplot, i)
# bax.plot(xF, yF, color='red')
# bax.plot(xF, yF-d/2, color='black')
# bax.plot(xF, yF+d/2, color='black')
# bax.plot(xF[iG], yF[iG]+d/2, color='blue')
# bax.plot(xF[iH], yF[iH]-d/2, color='blue')
# if d <= D:
# if verbose:
# print("Difference in modal interval smaller than current dip")
# break
# Find new modal interval so that largest difference is at endpoint
# and set d to largest distance between current GCM and LCM.
if gdiff[imaxdiffg] > hdiff[imaxdiffh]:
L0 = iG[imaxdiffg]
U0 = iH[iH >= L0][0]
else:
U0 = iH[imaxdiffh]
L0 = iG[iG <= U0][-1]
# Add points outside the modal interval to the final GCM and LCM.
iGfin = np.hstack([iGfin, iG[(iG <= L0)*(iG > L)]])
iHfin = np.hstack([iH[(iH >= U0)*(iH < U)], iHfin])
# # Plot new modal interval
# if plotting:
# ymin, ymax = bax.get_ylim()
# bax.axvline(xF[L0], ymin, ymax, color='orange')
# bax.axvline(xF[U0], ymin, ymax, color='red')
# bax.set_xlim(xF[L]-.1*(xF[U]-xF[L]), xF[U]+.1*(xF[U]-xF[L]))
# Compute new lower bound for dip*2
# i.e. largest difference outside modal interval
gipl = np.interp(xF[L:(L0+1)], xF[iG], yF[iG])
D = max(D, np.amax(yF[L:(L0+1)] - gipl))
hipl = np.interp(xF[U0:(U+1)], xF[iH], yF[iH])
D = max(D, np.amax(hipl - yF[U0:(U+1)]))
if xF[U0]-xF[L0] < eps:
if verbose:
print("Modal interval zero length")
break
# if plotting:
# mxpt = np.argmax(yF[L:(L0+1)] - gipl)
# bax.plot([xF[L:][mxpt], xF[L:][mxpt]], [yF[L:][mxpt]+d/2,
# gipl[mxpt]+d/2], '+', color='red')
# mxpt = np.argmax(hipl - yF[U0:(U+1)])
# bax.plot([xF[U0:][mxpt], xF[U0:][mxpt]], [yF[U0:][mxpt]-d/2,
# hipl[mxpt]-d/2], '+', color='red')
# i += 1
# Change modal interval
L = L0
U = U0
if d <= D:
if verbose:
print("Difference in modal interval smaller than new dip")
break
# if plotting:
# # Add modal interval to figure
# bax.axvline(xF[L0], ymin, ymax, color='green', linestyle='dashed')
# bax.axvline(xF[U0], ymin, ymax, color='green', linestyle='dashed')
# ## Plot unimodal function (not distribution function)
# bfig = plt.figure()
# bax = bfig.add_subplot(1, 1, 1)
# bax.plot(xF, yF, color='red')
# bax.plot(xF, yF-D/2, color='black')
# bax.plot(xF, yF+D/2, color='black')
# Find string position in modal interval
iM = np.arange(iGfin[-1], iHfin[0]+1)
yM_lower = yF[iM]-D/2
yM_lower[0] = yF[iM[0]]+D/2
iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower)
iM_concave = iM[iMM_concave]
#bax.plot(xF[iM], yM_lower, color='orange')
#bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red')
lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave])
try:
mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]]
#bax.axvline(xF[mode], color='green', linestyle='dashed')
except IndexError:
iM_convex = np.zeros(0, dtype='i')
else:
after_mode = iM_concave > mode
iM_concave = iM_concave[after_mode]
iMM_concave = iMM_concave[after_mode]
iM = iM[iM <= mode]
iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])]
# if plotting:
# bax.plot(xF[np.hstack([iGfin, iM_convex, iM_concave, iHfin])],
# np.hstack([yF[iGfin] + D/2, yF[iM_convex] + D/2,
# yM_lower[iMM_concave], yF[iHfin] - D/2]), color='blue')
# #bax.plot(xF[iM], yM_lower, color='orange')
# ## Plot unimodal distribution function
# bfig = plt.figure()
# bax = bfig.add_subplot(1, 1, 1)
# bax.plot(xF, yF, color='red')
# bax.plot(xF, yF-D/2, color='black')
# bax.plot(xF, yF+D/2, color='black')
# Find string position in modal interval
iM = np.arange(iGfin[-1], iHfin[0]+1)
yM_lower = yF[iM]-D/2
yM_lower[0] = yF[iM[0]]+D/2
iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower)
iM_concave = iM[iMM_concave]
#bax.plot(xF[iM], yM_lower, color='orange')
#bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red')
lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave])
try:
mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]]
#bax.axvline(xF[mode], color='green', linestyle='dashed')
except IndexError:
iM_convex = np.zeros(0, dtype='i')
else:
after_mode = iM_concave > mode
iM_concave = iM_concave[after_mode]
iMM_concave = iMM_concave[after_mode]
iM = iM[iM <= mode]
iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])]
# Closest unimodal curve
xU = xF[np.hstack([iGfin[:-1], iM_convex, iM_concave, iHfin[1:]])]
yU = np.hstack([yF[iGfin[:-1]] + D/2, yF[iM_convex] + D/2,
yM_lower[iMM_concave], yF[iHfin[1:]] - D/2])
# Add points so unimodal curve goes from 0 to 1
k_start = (yU[1]-yU[0])/(xU[1]-xU[0]+1e-5)
xU_start = xU[0] - yU[0]/(k_start+1e-5)
k_end = (yU[-1]-yU[-2])/(xU[-1]-xU[-2]+1e-5)
xU_end = xU[-1] + (1-yU[-1])/(k_end+1e-5)
xU = np.hstack([xU_start, xU, xU_end])
yU = np.hstack([0, yU, 1])
# if plotting:
# bax.plot(xU, yU, color='blue')
# #bax.plot(xF[iM], yM_lower, color='orange')
# plt.show()
return D/2, (xU, yU)
def greatest_convex_minorant_sorted(x, y):
i = least_concave_majorant_sorted(x, -y)
return i
def least_concave_majorant_sorted(x, y, eps=1e-12):
i = [0]
icurr = 0
while icurr < len(x) - 1:
if np.abs(x[icurr+1]-x[icurr]) > eps:
q = (y[(icurr+1):]-y[icurr])/(x[(icurr+1):]-x[icurr])
icurr += 1 + np.argmax(q)
i.append(icurr)
elif y[icurr+1] > y[icurr] or icurr == len(x)-2:
icurr += 1
i.append(icurr)
elif np.abs(x[icurr+2]-x[icurr]) > eps:
q = (y[(icurr+2):]-y[icurr])/(x[(icurr+2):]-x[icurr])
icurr += 2 + np.argmax(q)
i.append(icurr)
else:
print("x[icurr] = {}, x[icurr+1] = {}, x[icurr+2] = {}".format(x[icurr],
x[icurr+1], x[icurr+2]))
raise ValueError('Maximum two copies of each x-value allowed')
return np.array(i)
class KernelDensityDerivative(object):
def __init__(self, data, deriv_order):
if deriv_order == 0:
self.kernel = lambda u: np.exp(-u**2/2)
elif deriv_order == 2:
self.kernel = lambda u: (u**2-1)*np.exp(-u**2/2)
else:
raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
self.deriv_order = deriv_order
self.h = silverman_bandwidth(data, deriv_order)
self.datah = data/self.h
def evaluate(self, x):
xh = np.array(x).reshape(-1)/self.h
res = np.zeros(len(xh))
if len(xh) > len(self.datah): # loop over data
for data_ in self.datah:
res += self.kernel(data_-xh)
else: # loop over x
for i, x_ in enumerate(xh):
res[i] = np.sum(self.kernel(self.datah-x_))
return res*1./(np.sqrt(2*np.pi)*self.h**(1+self.deriv_order)*len(self.datah))
def score_samples(self, x):
return self.evaluate(x)
# def plot(self, ax=None):
# x = self.h*np.linspace(np.min(self.datah)-5, np.max(self.datah)+5, 200)
# y = self.evaluate(x)
# if ax is None:
# fig, ax = plt.subplots()
# ax.plot(x, y)
def silverman_bandwidth(data, deriv_order=0):
sigmahat = np.std(data, ddof=1)
return sigmahat*bandwidth_factor(data.shape[0], deriv_order)
def bandwidth_factor(nbr_data_pts, deriv_order=0):
'''
Scale factor for one-dimensional plug-in bandwidth selection.
'''
if deriv_order == 0:
return (3.0*nbr_data_pts/4)**(-1.0/5)
if deriv_order == 2:
return (7.0*nbr_data_pts/4)**(-1.0/9)
raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
def calibrated_dip_test(data, N_bootstrap=1000):
xF, yF = cum_distr(data)
dip = dip_from_cdf(xF, yF)
n_eval = 512
f_hat = KernelDensityDerivative(data, 0)
f_bis_hat = KernelDensityDerivative(data, 2)
x = np.linspace(np.min(data), np.max(data), n_eval)
f_hat_eval = f_hat.evaluate(x)
ind_x0_hat = np.argmax(f_hat_eval)
d_hat = np.abs(f_bis_hat.evaluate(x[ind_x0_hat]))/f_hat_eval[ind_x0_hat]**3
ref_distr = select_calibration_distribution(d_hat)
ref_dips = np.zeros(N_bootstrap)
for i in range(N_bootstrap):
samp = ref_distr.sample(len(data))
xF, yF = cum_distr(samp)
ref_dips[i] = dip_from_cdf(xF, yF)
return np.mean(ref_dips > dip)
def select_calibration_distribution(d_hat):
# data_dir = os.path.join('.', 'data')
# print(data_dir)
# with open(os.path.join(data_dir, 'gammaval.pkl'), 'r') as f:
# savedat = pickle.load(f)
savedat = {'beta_betadistr': np.array([1.0,
2.718281828459045,
7.38905609893065,
20.085536923187668,
54.598150033144236,
148.4131591025766,
403.4287934927351,
1096.6331584284585,
2980.9579870417283,
8103.083927575384,
22026.465794806718,
59874.14171519782,
162754.79141900392,
442413.3920089205,
1202604.2841647768,
3269017.3724721107,
8886110.520507872,
24154952.7535753,
65659969.13733051,
178482300.96318725]),
'beta_studentt': np.array([0.5,
1.3591409142295225,
3.694528049465325,
10.042768461593834,
27.299075016572118,
74.2065795512883,
201.71439674636756,
548.3165792142293,
1490.4789935208642,
4051.541963787692,
11013.232897403359,
29937.07085759891,
81377.39570950196,
221206.69600446025,
601302.1420823884,
1634508.6862360553,
4443055.260253936,
12077476.37678765,
32829984.568665255,
89241150.48159362]),
'gamma_betadistr': np.array([0.0,
4.3521604788918555,
5.619663288128619,
6.045132289787511,
6.196412312629769,
6.251371005194619,
6.271496014102775,
6.2788870215785195,
6.281604322090273,
6.282603731161307,
6.282971362173459,
6.283106602190213,
6.283156350612787,
6.283174653445515,
6.2831813886918635,
6.283183865648734,
6.283184776870057,
6.283185112089616,
6.283185235410011,
6.2831852807770385]),
'gamma_studentt': np.array([np.inf,
13.130440672051542,
7.855693794235218,
6.787835735957803,
6.46039623388715,
6.3473005818376755,
6.306629302123698,
6.291790708027913,
6.286348471156239,
6.284348620590986,
6.283613218820035,
6.283342721628752,
6.283243215161844,
6.28320661564662,
6.283193150917383,
6.283188190242287,
6.283186367798792,
6.28318569735954,
6.283185450718775,
6.283185359984703])}
if np.abs(d_hat-np.pi) < 1e-4:
return RefGaussian()
if d_hat < 2*np.pi: # beta distribution
gamma = lambda beta: 2*(beta-1)*betafun(beta, 1.0/2)**2 - d_hat
i = np.searchsorted(savedat['gamma_betadistr'], d_hat)
beta_left = savedat['beta_betadistr'][i-1]
beta_right = savedat['beta_betadistr'][i]
beta = brentq(gamma, beta_left, beta_right)
return RefBeta(beta)
# student t distribution
gamma = lambda beta: 2*beta*betafun(beta-1./2, 1./2)**2 - d_hat
i = np.searchsorted(-savedat['gamma_studentt'], -d_hat)
beta_left = savedat['beta_studentt'][i-1]
beta_right = savedat['beta_studentt'][i]
beta = brentq(gamma, beta_left, beta_right)
return RefStudentt(beta)
class RefGaussian(object):
def sample(self, n):
return np.random.randn(n)
class RefBeta(object):
def __init__(self, beta):
self.beta = beta
def sample(self, n):
return np.random.beta(self.beta, self.beta, n)
class RefStudentt(object):
def __init__(self, beta):
self.beta = beta
def sample(self, n):
dof = 2*self.beta-1
return 1./np.sqrt(dof)*np.random.standard_t(dof, n)
|
tompollard/tableone
|
modality.py
|
dip_and_closest_unimodal_from_cdf
|
python
|
def dip_and_closest_unimodal_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12):
'''
Dip computed as distance between empirical distribution function (EDF) and
cumulative distribution function for the unimodal distribution with
smallest such distance. The optimal unimodal distribution is found by
the algorithm presented in
Hartigan (1985): Computation of the dip statistic to test for
unimodaliy. Applied Statistics, vol. 34, no. 3
If the plotting option is enabled the optimal unimodal distribution
function is plotted along with (xF, yF-dip) and (xF, yF+dip)
xF - x-coordinates for EDF
yF - y-coordinates for EDF
'''
## TODO! Preprocess xF and yF so that yF increasing and xF does
## not have more than two copies of each x-value.
if (xF[1:]-xF[:-1] < -eps).any():
raise ValueError('Need sorted x-values to compute dip')
if (yF[1:]-yF[:-1] < -eps).any():
raise ValueError('Need sorted y-values to compute dip')
# if plotting:
# Nplot = 5
# bfig = plt.figure(figsize=(12, 3))
# i = 1 # plot index
D = 0 # lower bound for dip*2
# [L, U] is interval where we still need to find unimodal function,
# the modal interval
L = 0
U = len(xF) - 1
# iGfin are the indices of xF where the optimal unimodal distribution is greatest
# convex minorant to (xF, yF+dip)
# iHfin are the indices of xF where the optimal unimodal distribution is least
# concave majorant to (xF, yF-dip)
iGfin = L
iHfin = U
while 1:
iGG = greatest_convex_minorant_sorted(xF[L:(U+1)], yF[L:(U+1)])
iHH = least_concave_majorant_sorted(xF[L:(U+1)], yF[L:(U+1)])
iG = np.arange(L, U+1)[iGG]
iH = np.arange(L, U+1)[iHH]
# Interpolate. First and last point are in both and does not need
# interpolation. Might cause trouble if included due to possiblity
# of infinity slope at beginning or end of interval.
if iG[0] != iH[0] or iG[-1] != iH[-1]:
raise ValueError('Convex minorant and concave majorant should start and end at same points.')
hipl = np.interp(xF[iG[1:-1]], xF[iH], yF[iH])
gipl = np.interp(xF[iH[1:-1]], xF[iG], yF[iG])
hipl = np.hstack([yF[iH[0]], hipl, yF[iH[-1]]])
gipl = np.hstack([yF[iG[0]], gipl, yF[iG[-1]]])
#hipl = lin_interpol_sorted(xF[iG], xF[iH], yF[iH])
#gipl = lin_interpol_sorted(xF[iH], xF[iG], yF[iG])
# Find largest difference between GCM and LCM.
gdiff = hipl - yF[iG]
hdiff = yF[iH] - gipl
imaxdiffg = np.argmax(gdiff)
imaxdiffh = np.argmax(hdiff)
d = max(gdiff[imaxdiffg], hdiff[imaxdiffh])
# # Plot current GCM and LCM.
# if plotting:
# if i > Nplot:
# bfig = plt.figure(figsize=(12, 3))
# i = 1
# bax = bfig.add_subplot(1, Nplot, i)
# bax.plot(xF, yF, color='red')
# bax.plot(xF, yF-d/2, color='black')
# bax.plot(xF, yF+d/2, color='black')
# bax.plot(xF[iG], yF[iG]+d/2, color='blue')
# bax.plot(xF[iH], yF[iH]-d/2, color='blue')
# if d <= D:
# if verbose:
# print("Difference in modal interval smaller than current dip")
# break
# Find new modal interval so that largest difference is at endpoint
# and set d to largest distance between current GCM and LCM.
if gdiff[imaxdiffg] > hdiff[imaxdiffh]:
L0 = iG[imaxdiffg]
U0 = iH[iH >= L0][0]
else:
U0 = iH[imaxdiffh]
L0 = iG[iG <= U0][-1]
# Add points outside the modal interval to the final GCM and LCM.
iGfin = np.hstack([iGfin, iG[(iG <= L0)*(iG > L)]])
iHfin = np.hstack([iH[(iH >= U0)*(iH < U)], iHfin])
# # Plot new modal interval
# if plotting:
# ymin, ymax = bax.get_ylim()
# bax.axvline(xF[L0], ymin, ymax, color='orange')
# bax.axvline(xF[U0], ymin, ymax, color='red')
# bax.set_xlim(xF[L]-.1*(xF[U]-xF[L]), xF[U]+.1*(xF[U]-xF[L]))
# Compute new lower bound for dip*2
# i.e. largest difference outside modal interval
gipl = np.interp(xF[L:(L0+1)], xF[iG], yF[iG])
D = max(D, np.amax(yF[L:(L0+1)] - gipl))
hipl = np.interp(xF[U0:(U+1)], xF[iH], yF[iH])
D = max(D, np.amax(hipl - yF[U0:(U+1)]))
if xF[U0]-xF[L0] < eps:
if verbose:
print("Modal interval zero length")
break
# if plotting:
# mxpt = np.argmax(yF[L:(L0+1)] - gipl)
# bax.plot([xF[L:][mxpt], xF[L:][mxpt]], [yF[L:][mxpt]+d/2,
# gipl[mxpt]+d/2], '+', color='red')
# mxpt = np.argmax(hipl - yF[U0:(U+1)])
# bax.plot([xF[U0:][mxpt], xF[U0:][mxpt]], [yF[U0:][mxpt]-d/2,
# hipl[mxpt]-d/2], '+', color='red')
# i += 1
# Change modal interval
L = L0
U = U0
if d <= D:
if verbose:
print("Difference in modal interval smaller than new dip")
break
# if plotting:
# # Add modal interval to figure
# bax.axvline(xF[L0], ymin, ymax, color='green', linestyle='dashed')
# bax.axvline(xF[U0], ymin, ymax, color='green', linestyle='dashed')
# ## Plot unimodal function (not distribution function)
# bfig = plt.figure()
# bax = bfig.add_subplot(1, 1, 1)
# bax.plot(xF, yF, color='red')
# bax.plot(xF, yF-D/2, color='black')
# bax.plot(xF, yF+D/2, color='black')
# Find string position in modal interval
iM = np.arange(iGfin[-1], iHfin[0]+1)
yM_lower = yF[iM]-D/2
yM_lower[0] = yF[iM[0]]+D/2
iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower)
iM_concave = iM[iMM_concave]
#bax.plot(xF[iM], yM_lower, color='orange')
#bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red')
lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave])
try:
mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]]
#bax.axvline(xF[mode], color='green', linestyle='dashed')
except IndexError:
iM_convex = np.zeros(0, dtype='i')
else:
after_mode = iM_concave > mode
iM_concave = iM_concave[after_mode]
iMM_concave = iMM_concave[after_mode]
iM = iM[iM <= mode]
iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])]
# if plotting:
# bax.plot(xF[np.hstack([iGfin, iM_convex, iM_concave, iHfin])],
# np.hstack([yF[iGfin] + D/2, yF[iM_convex] + D/2,
# yM_lower[iMM_concave], yF[iHfin] - D/2]), color='blue')
# #bax.plot(xF[iM], yM_lower, color='orange')
# ## Plot unimodal distribution function
# bfig = plt.figure()
# bax = bfig.add_subplot(1, 1, 1)
# bax.plot(xF, yF, color='red')
# bax.plot(xF, yF-D/2, color='black')
# bax.plot(xF, yF+D/2, color='black')
# Find string position in modal interval
iM = np.arange(iGfin[-1], iHfin[0]+1)
yM_lower = yF[iM]-D/2
yM_lower[0] = yF[iM[0]]+D/2
iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower)
iM_concave = iM[iMM_concave]
#bax.plot(xF[iM], yM_lower, color='orange')
#bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red')
lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave])
try:
mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]]
#bax.axvline(xF[mode], color='green', linestyle='dashed')
except IndexError:
iM_convex = np.zeros(0, dtype='i')
else:
after_mode = iM_concave > mode
iM_concave = iM_concave[after_mode]
iMM_concave = iMM_concave[after_mode]
iM = iM[iM <= mode]
iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])]
# Closest unimodal curve
xU = xF[np.hstack([iGfin[:-1], iM_convex, iM_concave, iHfin[1:]])]
yU = np.hstack([yF[iGfin[:-1]] + D/2, yF[iM_convex] + D/2,
yM_lower[iMM_concave], yF[iHfin[1:]] - D/2])
# Add points so unimodal curve goes from 0 to 1
k_start = (yU[1]-yU[0])/(xU[1]-xU[0]+1e-5)
xU_start = xU[0] - yU[0]/(k_start+1e-5)
k_end = (yU[-1]-yU[-2])/(xU[-1]-xU[-2]+1e-5)
xU_end = xU[-1] + (1-yU[-1])/(k_end+1e-5)
xU = np.hstack([xU_start, xU, xU_end])
yU = np.hstack([0, yU, 1])
# if plotting:
# bax.plot(xU, yU, color='blue')
# #bax.plot(xF[iM], yM_lower, color='orange')
# plt.show()
return D/2, (xU, yU)
|
Dip computed as distance between empirical distribution function (EDF) and
cumulative distribution function for the unimodal distribution with
smallest such distance. The optimal unimodal distribution is found by
the algorithm presented in
Hartigan (1985): Computation of the dip statistic to test for
unimodaliy. Applied Statistics, vol. 34, no. 3
If the plotting option is enabled the optimal unimodal distribution
function is plotted along with (xF, yF-dip) and (xF, yF+dip)
xF - x-coordinates for EDF
yF - y-coordinates for EDF
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/modality.py#L719-L942
| null |
# ###################################### #
# #
# Updated by: Tom Pollard (2018.03.19) #
# Author: Kerstin Johnsson #
# License: MIT License #
# Available from: #
# https://github.com/kjohnsson/modality #
# #
# ###################################### #
import numpy as np
from scipy.special import beta as betafun
# import matplotlib.pyplot as plt
from scipy.optimize import brentq
import os
import pandas as pd
np.random.seed(1337)
# import pickle
def generate_data(peaks=2, n=None, mu=None, std=None):
# Generate parameters if not provided
if not n:
n = [5000] * peaks
if not mu:
mu = np.random.randint(0,30,peaks)
if not std:
std = [1.0] * peaks
# generate distributions then append
dists = []
for i in range(peaks):
tmp = np.random.normal(loc=mu[i], scale=std[i], size=n[i])
dists.append(tmp)
data = np.concatenate(dists)
return data
def hartigan_diptest(data):
'''
P-value according to Hartigan's dip test for unimodality.
The dip is computed using the function
dip_and_closest_unimodal_from_cdf. From this the p-value is
interpolated using a table imported from the R package diptest.
References:
Hartigan and Hartigan (1985): The dip test of unimodality.
The Annals of Statistics. 13(1).
Input:
data - one-dimensional data set.
Value:
p-value for the test.
'''
try:
p = pval_hartigan(data[~np.isnan(data)])
except:
p = np.nan
return p
def pval_hartigan(data):
xF, yF = cum_distr(data)
dip = dip_from_cdf(xF, yF)
return dip_pval_tabinterpol(dip, len(data))
def cum_distr(data, w=None):
if w is None:
w = np.ones(len(data))*1./len(data)
eps = 1e-10
data_ord = np.argsort(data)
data_sort = data[data_ord]
w_sort = w[data_ord]
data_sort, indices = unique(data_sort, return_index=True, eps=eps, is_sorted=True)
if len(indices) < len(data_ord):
w_unique = np.zeros(len(indices))
for i in range(len(indices)-1):
w_unique[i] = np.sum(w_sort[indices[i]:indices[i+1]])
w_unique[-1] = np.sum(w_sort[indices[-1]:])
w_sort = w_unique
wcum = np.cumsum(w_sort)
wcum /= wcum[-1]
N = len(data_sort)
x = np.empty(2*N)
x[2*np.arange(N)] = data_sort
x[2*np.arange(N)+1] = data_sort
y = np.empty(2*N)
y[0] = 0
y[2*np.arange(N)+1] = wcum
y[2*np.arange(N-1)+2] = wcum[:-1]
return x, y
def unique(data, return_index, eps, is_sorted=True):
if not is_sorted:
ord = np.argsort(data)
rank = np.argsort(ord)
data_sort = data[ord]
else:
data_sort = data
isunique_sort = np.ones(len(data_sort), dtype='bool')
j = 0
for i in range(1, len(data_sort)):
if data_sort[i] - data_sort[j] < eps:
isunique_sort[i] = False
else:
j = i
if not is_sorted:
isunique = isunique_sort[rank]
data_unique = data[isunique]
else:
data_unique = data[isunique_sort]
if not return_index:
return data_unique
if not is_sorted:
ind_unique = np.nonzero(isunique)[0]
else:
ind_unique = np.nonzero(isunique_sort)[0]
return data_unique, ind_unique
def dip_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12):
dip, _ = dip_and_closest_unimodal_from_cdf(xF, yF, plotting, verbose, eps)
return dip
def dip_pval_tabinterpol(dip, N):
'''
dip - dip value computed from dip_from_cdf
N - number of observations
'''
# if qDiptab_df is None:
# raise DataError("Tabulated p-values not available. See installation instructions.")
if np.isnan(N) or N < 10:
return np.nan
qDiptab_dict = {'0': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0625,
9: 0.0555555555555556,
10: 0.05,
15: 0.0341378172277919,
20: 0.033718563622065004,
30: 0.0262674485075642,
50: 0.0218544781364545,
100: 0.0164852597438403,
200: 0.0111236388849688,
500: 0.007554885975761959,
1000: 0.00541658127872122,
2000: 0.0039043999745055702,
5000: 0.00245657785440433,
10000: 0.00174954269199566,
20000: 0.00119458814106091,
40000: 0.000852415648011777,
72000: 0.000644400053256997},
'0.01': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0625,
9: 0.0613018090298924,
10: 0.0610132555623269,
15: 0.0546284208048975,
20: 0.0474333740698401,
30: 0.0395871890405749,
50: 0.0314400501999916,
100: 0.022831985803043,
200: 0.0165017735429825,
500: 0.0106403461127515,
1000: 0.0076028674530018705,
2000: 0.0054166418179658294,
5000: 0.0034480928223332603,
10000: 0.00244595133885302,
20000: 0.00173435346896287,
40000: 0.00122883479310665,
72000: 0.000916872204484283},
'0.02': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0656911994503283,
9: 0.0658615858179315,
10: 0.0651627333214016,
15: 0.0572191260231815,
20: 0.0490891387627092,
30: 0.0414574606741673,
50: 0.0329008160470834,
100: 0.0238917486442849,
200: 0.0172594157992489,
500: 0.0111255573208294,
1000: 0.00794987834644799,
2000: 0.0056617138625232296,
5000: 0.00360473943713036,
10000: 0.00255710802275612,
20000: 0.0018119443458468102,
40000: 0.0012846930445701802,
72000: 0.0009579329467655321},
'0.05': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0725717816250742,
8: 0.0738651136071762,
9: 0.0732651142535317,
10: 0.0718321619656165,
15: 0.0610087367689692,
20: 0.052719998201553,
30: 0.0444462614069956,
50: 0.0353023819040016,
100: 0.0256559537977579,
200: 0.0185259426032926,
500: 0.0119353655328931,
1000: 0.0085216518343594,
2000: 0.00607120971135229,
5000: 0.0038632654801084897,
10000: 0.00273990955227265,
20000: 0.00194259470485893,
40000: 0.0013761765052555301,
72000: 0.00102641863872347},
'0.1': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0817315478539489,
8: 0.0820045917762512,
9: 0.0803941629593475,
10: 0.077966212182459,
15: 0.0642657137330444,
20: 0.0567795509056742,
30: 0.0473998525042686,
50: 0.0377279973102482,
100: 0.0273987414570948,
200: 0.0197917612637521,
500: 0.0127411306411808,
1000: 0.00909775605533253,
2000: 0.0064762535755248,
5000: 0.00412089506752692,
10000: 0.0029225480567908,
20000: 0.00207173719623868,
40000: 0.0014675150200632301,
72000: 0.0010949515421800199},
'0.2': {4: 0.125,
5: 0.1,
6: 0.0924514470941933,
7: 0.0940590181922527,
8: 0.0922700601131892,
9: 0.0890432420913848,
10: 0.0852835359834564,
15: 0.0692234107989591,
20: 0.0620134674468181,
30: 0.0516677370374349,
50: 0.0410699984399582,
100: 0.0298109370830153,
200: 0.0215233745778454,
500: 0.0138524542751814,
1000: 0.00988924521014078,
2000: 0.00703573098590029,
5000: 0.00447640050137479,
10000: 0.00317374638422465,
20000: 0.00224993202086955,
40000: 0.00159376453672466,
72000: 0.00118904090369415},
'0.3': {4: 0.125,
5: 0.1,
6: 0.103913431059949,
7: 0.10324449080087102,
8: 0.0996737189599363,
9: 0.0950811420297928,
10: 0.0903204173707099,
15: 0.0745462114365167,
20: 0.0660163872069048,
30: 0.0551037519001622,
50: 0.0437704598622665,
100: 0.0317771496530253,
200: 0.0229259769870428,
500: 0.0147536004288476,
1000: 0.0105309297090482,
2000: 0.007494212545892991,
5000: 0.00476555693102276,
10000: 0.00338072258533527,
20000: 0.00239520831473419,
40000: 0.00169668445506151,
72000: 0.00126575197699874},
'0.4': {4: 0.125,
5: 0.10872059357632902,
6: 0.113885220640212,
7: 0.110964599995697,
8: 0.10573353180273701,
9: 0.0999380897811046,
10: 0.0943334983745117,
15: 0.0792030878981762,
20: 0.0696506075066401,
30: 0.058265005347492994,
50: 0.0462925642671299,
100: 0.0336073821590387,
200: 0.024243848341112,
500: 0.0155963185751048,
1000: 0.0111322726797384,
2000: 0.007920878896017329,
5000: 0.005037040297500721,
10000: 0.0035724387653598205,
20000: 0.00253036792824665,
40000: 0.0017925341833790601,
72000: 0.00133750966361506},
'0.5': {4: 0.125,
5: 0.12156379802641401,
6: 0.123071187137781,
7: 0.11780784650433501,
8: 0.11103512984770501,
9: 0.10415356007586801,
10: 0.0977817630384725,
15: 0.083621033469191,
20: 0.0733437740592714,
30: 0.0614510857304343,
50: 0.048851155289608,
100: 0.0354621760592113,
200: 0.025584358256487003,
500: 0.0164519238025286,
1000: 0.0117439009052552,
2000: 0.008355737247680059,
5000: 0.0053123924740821294,
10000: 0.00376734715752209,
20000: 0.00266863168718114,
40000: 0.00189061261635977,
72000: 0.00141049709228472},
'0.6': {4: 0.125,
5: 0.134318918697053,
6: 0.13186973390253,
7: 0.124216086833531,
8: 0.11592005574998801,
9: 0.10800780236193198,
10: 0.102180866696628,
15: 0.0881198482202905,
20: 0.0776460662880254,
30: 0.0649164408053978,
50: 0.0516145897865757,
100: 0.0374805844550272,
200: 0.0270252129816288,
500: 0.017383057902553,
1000: 0.012405033293814,
2000: 0.00882439333812351,
5000: 0.00560929919359959,
10000: 0.00397885007249132,
20000: 0.0028181999035216,
40000: 0.00199645471886179,
72000: 0.00148936709298802},
'0.7': {4: 0.13255954878268902,
5: 0.14729879897625198,
6: 0.140564796497941,
7: 0.130409013968317,
8: 0.120561479262465,
9: 0.112512617124951,
10: 0.10996094814295099,
15: 0.093124666680253,
20: 0.0824558407118372,
30: 0.0689178762425442,
50: 0.0548121932066019,
100: 0.0398046179116599,
200: 0.0286920262150517,
500: 0.0184503949887735,
1000: 0.0131684179320803,
2000: 0.009367858207170609,
5000: 0.00595352728377949,
10000: 0.00422430013176233,
20000: 0.00299137548142077,
40000: 0.00211929748381704,
72000: 0.00158027541945626},
'0.8': {4: 0.15749736904023498,
5: 0.161085025702604,
6: 0.14941924112913002,
7: 0.136639642123068,
8: 0.125558759034845,
9: 0.12291503348081699,
10: 0.11884476721158699,
15: 0.0996694393390689,
20: 0.08834462700173701,
30: 0.0739249074078291,
50: 0.0588230482851366,
100: 0.0427283846799166,
200: 0.0308006766341406,
500: 0.0198162679782071,
1000: 0.0141377942603047,
2000: 0.01005604603884,
5000: 0.00639092280563517,
10000: 0.00453437508148542,
20000: 0.00321024899920135,
40000: 0.0022745769870358102,
72000: 0.00169651643860074},
'0.9': {4: 0.18740187880755899,
5: 0.176811998476076,
6: 0.159137064572627,
7: 0.144240669035124,
8: 0.141841067033899,
9: 0.136412639387084,
10: 0.130462149644819,
15: 0.11008749690090598,
20: 0.0972346018122903,
30: 0.0814791379390127,
50: 0.0649136324046767,
100: 0.047152783315718,
200: 0.0339967814293504,
500: 0.0218781313182203,
1000: 0.0156148055023058,
2000: 0.0111019116837591,
5000: 0.00705566126234625,
10000: 0.00500178808402368,
20000: 0.00354362220314155,
40000: 0.00250999080890397,
72000: 0.0018730618472582602},
'0.95': {4: 0.20726978858735998,
5: 0.18639179602794398,
6: 0.164769608513302,
7: 0.159903395678336,
8: 0.153978303998561,
9: 0.14660378495401902,
10: 0.139611395137099,
15: 0.118760769203664,
20: 0.105130218270636,
30: 0.0881689143126666,
50: 0.0702737877191269,
100: 0.0511279442868827,
200: 0.0368418413878307,
500: 0.0237294742633411,
1000: 0.0169343970067564,
2000: 0.0120380990328341,
5000: 0.0076506368153935,
10000: 0.00542372242836395,
20000: 0.00384330190244679,
40000: 0.00272375073486223,
72000: 0.00203178401610555},
'0.98': {4: 0.22375580462922195,
5: 0.19361253363045,
6: 0.17917654739278197,
7: 0.17519655327122302,
8: 0.16597856724751,
9: 0.157084065653166,
10: 0.150961728882481,
15: 0.128890475210055,
20: 0.11430970428125302,
30: 0.0960564383013644,
50: 0.0767095886079179,
100: 0.0558022052195208,
200: 0.0402729850316397,
500: 0.025919578977657003,
1000: 0.018513067368104,
2000: 0.0131721010552576,
5000: 0.00836821687047215,
10000: 0.00592656681022859,
20000: 0.00420258799378253,
40000: 0.00298072958568387,
72000: 0.00222356097506054},
'0.99': {4: 0.231796258864192,
5: 0.19650913979884502,
6: 0.191862827995563,
7: 0.184118659121501,
8: 0.172988528276759,
9: 0.164164643657217,
10: 0.159684158858235,
15: 0.13598356863636,
20: 0.120624043335821,
30: 0.101478558893837,
50: 0.0811998415355918,
100: 0.059024132304226,
200: 0.0426864799777448,
500: 0.0274518022761997,
1000: 0.0196080260483234,
2000: 0.0139655122281969,
5000: 0.00886357892854914,
10000: 0.00628034732880374,
20000: 0.00445774902155711,
40000: 0.00315942194040388,
72000: 0.00235782814777627},
'0.995': {4: 0.23726374382677898,
5: 0.198159967287576,
6: 0.20210197104296804,
7: 0.19101439617430602,
8: 0.179010413496374,
9: 0.172821674582338,
10: 0.16719524735674,
15: 0.14245248368127697,
20: 0.126552378036739,
30: 0.10650487144103,
50: 0.0852854646662134,
100: 0.0620425065165146,
200: 0.044958959158761,
500: 0.0288986369564301,
1000: 0.0206489568587364,
2000: 0.0146889122204488,
5000: 0.00934162787186159,
10000: 0.00661030641550873,
20000: 0.00469461513212743,
40000: 0.0033273652798148,
72000: 0.00248343580127067},
'0.998': {4: 0.241992892688593,
5: 0.19924427936243302,
6: 0.213015781111186,
7: 0.198216795232182,
8: 0.186504388711178,
9: 0.182555283567818,
10: 0.175419540856082,
15: 0.15017281653074202,
20: 0.13360135382395,
30: 0.112724636524262,
50: 0.0904847827490294,
100: 0.0658016011466099,
200: 0.0477643873749449,
500: 0.0306813505050163,
1000: 0.0219285176765082,
2000: 0.0156076779647454,
5000: 0.009932186363240291,
10000: 0.00702254699967648,
20000: 0.004994160691291679,
40000: 0.00353988965698579,
72000: 0.00264210826339498},
'0.999': {4: 0.244369839049632,
5: 0.199617527406166,
6: 0.219518627282415,
7: 0.20234101074826102,
8: 0.19448404115794,
9: 0.188658833121906,
10: 0.180611195797351,
15: 0.15545613369632802,
20: 0.138569903791767,
30: 0.117164140184417,
50: 0.0940930106666244,
100: 0.0684479731118028,
200: 0.0497198001867437,
500: 0.0320170996823189,
1000: 0.0228689168972669,
2000: 0.0162685615996248,
5000: 0.0103498795291629,
10000: 0.0073182262815645795,
20000: 0.00520917757743218,
40000: 0.00369400045486625,
72000: 0.0027524322157581},
'0.9995': {4: 0.245966625504691,
5: 0.19980094149902802,
6: 0.22433904739444602,
7: 0.205377566346832,
8: 0.200864297005026,
9: 0.19408912076824603,
10: 0.18528641605039603,
15: 0.160896499106958,
20: 0.14336916123968,
30: 0.12142585990898701,
50: 0.0974904344916743,
100: 0.0709169443994193,
200: 0.0516114611801451,
500: 0.0332452747332959,
1000: 0.023738710122235003,
2000: 0.0168874937789415,
5000: 0.0107780907076862,
10000: 0.0076065423418208,
20000: 0.005403962359243721,
40000: 0.00383345715372182,
72000: 0.0028608570740143},
'0.9998': {4: 0.24743959723326198,
5: 0.19991708183427104,
6: 0.22944933215424101,
7: 0.208306562526874,
8: 0.20884999705022897,
9: 0.19915700809389003,
10: 0.19120308390504398,
15: 0.16697940794624802,
20: 0.148940116394883,
30: 0.126733051889401,
50: 0.10228420428399698,
100: 0.0741183486081263,
200: 0.0540543978864652,
500: 0.0348335698576168,
1000: 0.0248334158891432,
2000: 0.0176505093388153,
5000: 0.0113184316868283,
10000: 0.00795640367207482,
20000: 0.00564540201704594,
40000: 0.0040079346963469605,
72000: 0.00298695044508003},
'0.9999': {4: 0.24823065965663801,
5: 0.19995902909307503,
6: 0.232714530449602,
7: 0.209866047852379,
8: 0.212556040406219,
9: 0.20288159843655804,
10: 0.19580515933918397,
15: 0.17111793515551002,
20: 0.152832538183622,
30: 0.131198578897542,
50: 0.104680624334611,
100: 0.0762579402903838,
200: 0.0558704526182638,
500: 0.0359832389317461,
1000: 0.0256126573433596,
2000: 0.0181944265400504,
5000: 0.0117329446468571,
10000: 0.0082270524584354,
20000: 0.00580460792299214,
40000: 0.00414892737222885,
72000: 0.00309340092038059},
'0.99995': {4: 0.248754269146416,
5: 0.19997839537608197,
6: 0.236548128358969,
7: 0.21096757693345103,
8: 0.21714917413729898,
9: 0.205979795735129,
10: 0.20029398089673,
15: 0.17590050570443203,
20: 0.15601016361897102,
30: 0.133691739483444,
50: 0.107496694235039,
100: 0.0785735967934979,
200: 0.0573877056330228,
500: 0.0369051995840645,
1000: 0.0265491336936829,
2000: 0.0186226037818523,
5000: 0.0119995948968375,
10000: 0.00852240989786251,
20000: 0.00599774739593151,
40000: 0.0042839159079761,
72000: 0.00319932767198801},
'0.99998': {4: 0.24930203997425898,
5: 0.199993151405815,
6: 0.2390887911995,
7: 0.212233348558702,
8: 0.22170007640450304,
9: 0.21054115498898,
10: 0.20565108964621898,
15: 0.18185667601316602,
20: 0.16131922583934502,
30: 0.137831637950694,
50: 0.11140887547015,
100: 0.0813458356889133,
200: 0.0593365901653878,
500: 0.0387221159256424,
1000: 0.027578430100535997,
2000: 0.0193001796565433,
5000: 0.0124410052027886,
10000: 0.00892863905540303,
20000: 0.00633099254378114,
40000: 0.0044187010443287895,
72000: 0.00332688234611187},
'0.99999': {4: 0.24945965232322498,
5: 0.199995525025673,
6: 0.24010356643629502,
7: 0.21266103831250602,
8: 0.225000835357532,
9: 0.21180033095039003,
10: 0.209682048785853,
15: 0.185743454151004,
20: 0.165568255916749,
30: 0.14155750962435099,
50: 0.113536607717411,
100: 0.0832963013755522,
200: 0.0607646310473911,
500: 0.039930259057650005,
1000: 0.0284430733108,
2000: 0.0196241518040617,
5000: 0.0129467396733128,
10000: 0.009138539330002129,
20000: 0.00656987109386762,
40000: 0.00450818604569179,
72000: 0.00339316094477355},
'1': {4: 0.24974836247845,
5: 0.199999835639211,
6: 0.24467288361776798,
7: 0.21353618608817,
8: 0.23377291968768302,
9: 0.21537991431762502,
10: 0.221530282182963,
15: 0.19224056333056197,
20: 0.175834459522789,
30: 0.163833046059817,
50: 0.11788671686531199,
100: 0.0926780423096737,
200: 0.0705309107882395,
500: 0.0431448163617178,
1000: 0.0313640941982108,
2000: 0.0213081254074584,
5000: 0.014396063834027,
10000: 0.00952234579566773,
20000: 0.006858294480462271,
40000: 0.00513477467565583,
72000: 0.00376331697005859}}
qDiptab_df = pd.DataFrame(qDiptab_dict)
diptable = np.array(qDiptab_df)
ps = np.array(qDiptab_df.columns).astype(float)
Ns = np.array(qDiptab_df.index)
if N >= Ns[-1]:
dip = transform_dip_to_other_nbr_pts(dip, N, Ns[-1]-0.1)
N = Ns[-1]-0.1
iNlow = np.nonzero(Ns < N)[0][-1]
qN = (N-Ns[iNlow])/(Ns[iNlow+1]-Ns[iNlow])
dip_sqrtN = np.sqrt(N)*dip
dip_interpol_sqrtN = (
np.sqrt(Ns[iNlow])*diptable[iNlow, :] + qN*(
np.sqrt(Ns[iNlow+1])*diptable[iNlow+1, :]-np.sqrt(Ns[iNlow])*diptable[iNlow, :]))
if not (dip_interpol_sqrtN < dip_sqrtN).any():
return 1
iplow = np.nonzero(dip_interpol_sqrtN < dip_sqrtN)[0][-1]
if iplow == len(dip_interpol_sqrtN) - 1:
return 0
qp = (dip_sqrtN-dip_interpol_sqrtN[iplow])/(dip_interpol_sqrtN[iplow+1]-dip_interpol_sqrtN[iplow])
p_interpol = ps[iplow] + qp*(ps[iplow+1]-ps[iplow])
return 1 - p_interpol
def transform_dip_to_other_nbr_pts(dip_n, n, m):
dip_m = np.sqrt(n/m)*dip_n
return dip_m
def greatest_convex_minorant_sorted(x, y):
i = least_concave_majorant_sorted(x, -y)
return i
def least_concave_majorant_sorted(x, y, eps=1e-12):
i = [0]
icurr = 0
while icurr < len(x) - 1:
if np.abs(x[icurr+1]-x[icurr]) > eps:
q = (y[(icurr+1):]-y[icurr])/(x[(icurr+1):]-x[icurr])
icurr += 1 + np.argmax(q)
i.append(icurr)
elif y[icurr+1] > y[icurr] or icurr == len(x)-2:
icurr += 1
i.append(icurr)
elif np.abs(x[icurr+2]-x[icurr]) > eps:
q = (y[(icurr+2):]-y[icurr])/(x[(icurr+2):]-x[icurr])
icurr += 2 + np.argmax(q)
i.append(icurr)
else:
print("x[icurr] = {}, x[icurr+1] = {}, x[icurr+2] = {}".format(x[icurr],
x[icurr+1], x[icurr+2]))
raise ValueError('Maximum two copies of each x-value allowed')
return np.array(i)
class KernelDensityDerivative(object):
def __init__(self, data, deriv_order):
if deriv_order == 0:
self.kernel = lambda u: np.exp(-u**2/2)
elif deriv_order == 2:
self.kernel = lambda u: (u**2-1)*np.exp(-u**2/2)
else:
raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
self.deriv_order = deriv_order
self.h = silverman_bandwidth(data, deriv_order)
self.datah = data/self.h
def evaluate(self, x):
xh = np.array(x).reshape(-1)/self.h
res = np.zeros(len(xh))
if len(xh) > len(self.datah): # loop over data
for data_ in self.datah:
res += self.kernel(data_-xh)
else: # loop over x
for i, x_ in enumerate(xh):
res[i] = np.sum(self.kernel(self.datah-x_))
return res*1./(np.sqrt(2*np.pi)*self.h**(1+self.deriv_order)*len(self.datah))
def score_samples(self, x):
return self.evaluate(x)
# def plot(self, ax=None):
# x = self.h*np.linspace(np.min(self.datah)-5, np.max(self.datah)+5, 200)
# y = self.evaluate(x)
# if ax is None:
# fig, ax = plt.subplots()
# ax.plot(x, y)
def silverman_bandwidth(data, deriv_order=0):
sigmahat = np.std(data, ddof=1)
return sigmahat*bandwidth_factor(data.shape[0], deriv_order)
def bandwidth_factor(nbr_data_pts, deriv_order=0):
'''
Scale factor for one-dimensional plug-in bandwidth selection.
'''
if deriv_order == 0:
return (3.0*nbr_data_pts/4)**(-1.0/5)
if deriv_order == 2:
return (7.0*nbr_data_pts/4)**(-1.0/9)
raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
def calibrated_dip_test(data, N_bootstrap=1000):
xF, yF = cum_distr(data)
dip = dip_from_cdf(xF, yF)
n_eval = 512
f_hat = KernelDensityDerivative(data, 0)
f_bis_hat = KernelDensityDerivative(data, 2)
x = np.linspace(np.min(data), np.max(data), n_eval)
f_hat_eval = f_hat.evaluate(x)
ind_x0_hat = np.argmax(f_hat_eval)
d_hat = np.abs(f_bis_hat.evaluate(x[ind_x0_hat]))/f_hat_eval[ind_x0_hat]**3
ref_distr = select_calibration_distribution(d_hat)
ref_dips = np.zeros(N_bootstrap)
for i in range(N_bootstrap):
samp = ref_distr.sample(len(data))
xF, yF = cum_distr(samp)
ref_dips[i] = dip_from_cdf(xF, yF)
return np.mean(ref_dips > dip)
def select_calibration_distribution(d_hat):
# data_dir = os.path.join('.', 'data')
# print(data_dir)
# with open(os.path.join(data_dir, 'gammaval.pkl'), 'r') as f:
# savedat = pickle.load(f)
savedat = {'beta_betadistr': np.array([1.0,
2.718281828459045,
7.38905609893065,
20.085536923187668,
54.598150033144236,
148.4131591025766,
403.4287934927351,
1096.6331584284585,
2980.9579870417283,
8103.083927575384,
22026.465794806718,
59874.14171519782,
162754.79141900392,
442413.3920089205,
1202604.2841647768,
3269017.3724721107,
8886110.520507872,
24154952.7535753,
65659969.13733051,
178482300.96318725]),
'beta_studentt': np.array([0.5,
1.3591409142295225,
3.694528049465325,
10.042768461593834,
27.299075016572118,
74.2065795512883,
201.71439674636756,
548.3165792142293,
1490.4789935208642,
4051.541963787692,
11013.232897403359,
29937.07085759891,
81377.39570950196,
221206.69600446025,
601302.1420823884,
1634508.6862360553,
4443055.260253936,
12077476.37678765,
32829984.568665255,
89241150.48159362]),
'gamma_betadistr': np.array([0.0,
4.3521604788918555,
5.619663288128619,
6.045132289787511,
6.196412312629769,
6.251371005194619,
6.271496014102775,
6.2788870215785195,
6.281604322090273,
6.282603731161307,
6.282971362173459,
6.283106602190213,
6.283156350612787,
6.283174653445515,
6.2831813886918635,
6.283183865648734,
6.283184776870057,
6.283185112089616,
6.283185235410011,
6.2831852807770385]),
'gamma_studentt': np.array([np.inf,
13.130440672051542,
7.855693794235218,
6.787835735957803,
6.46039623388715,
6.3473005818376755,
6.306629302123698,
6.291790708027913,
6.286348471156239,
6.284348620590986,
6.283613218820035,
6.283342721628752,
6.283243215161844,
6.28320661564662,
6.283193150917383,
6.283188190242287,
6.283186367798792,
6.28318569735954,
6.283185450718775,
6.283185359984703])}
if np.abs(d_hat-np.pi) < 1e-4:
return RefGaussian()
if d_hat < 2*np.pi: # beta distribution
gamma = lambda beta: 2*(beta-1)*betafun(beta, 1.0/2)**2 - d_hat
i = np.searchsorted(savedat['gamma_betadistr'], d_hat)
beta_left = savedat['beta_betadistr'][i-1]
beta_right = savedat['beta_betadistr'][i]
beta = brentq(gamma, beta_left, beta_right)
return RefBeta(beta)
# student t distribution
gamma = lambda beta: 2*beta*betafun(beta-1./2, 1./2)**2 - d_hat
i = np.searchsorted(-savedat['gamma_studentt'], -d_hat)
beta_left = savedat['beta_studentt'][i-1]
beta_right = savedat['beta_studentt'][i]
beta = brentq(gamma, beta_left, beta_right)
return RefStudentt(beta)
class RefGaussian(object):
def sample(self, n):
return np.random.randn(n)
class RefBeta(object):
def __init__(self, beta):
self.beta = beta
def sample(self, n):
return np.random.beta(self.beta, self.beta, n)
class RefStudentt(object):
def __init__(self, beta):
self.beta = beta
def sample(self, n):
dof = 2*self.beta-1
return 1./np.sqrt(dof)*np.random.standard_t(dof, n)
|
tompollard/tableone
|
modality.py
|
bandwidth_factor
|
python
|
def bandwidth_factor(nbr_data_pts, deriv_order=0):
'''
Scale factor for one-dimensional plug-in bandwidth selection.
'''
if deriv_order == 0:
return (3.0*nbr_data_pts/4)**(-1.0/5)
if deriv_order == 2:
return (7.0*nbr_data_pts/4)**(-1.0/9)
raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
|
Scale factor for one-dimensional plug-in bandwidth selection.
|
train
|
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/modality.py#L1011-L1021
| null |
# ###################################### #
# #
# Updated by: Tom Pollard (2018.03.19) #
# Author: Kerstin Johnsson #
# License: MIT License #
# Available from: #
# https://github.com/kjohnsson/modality #
# #
# ###################################### #
import numpy as np
from scipy.special import beta as betafun
# import matplotlib.pyplot as plt
from scipy.optimize import brentq
import os
import pandas as pd
np.random.seed(1337)
# import pickle
def generate_data(peaks=2, n=None, mu=None, std=None):
# Generate parameters if not provided
if not n:
n = [5000] * peaks
if not mu:
mu = np.random.randint(0,30,peaks)
if not std:
std = [1.0] * peaks
# generate distributions then append
dists = []
for i in range(peaks):
tmp = np.random.normal(loc=mu[i], scale=std[i], size=n[i])
dists.append(tmp)
data = np.concatenate(dists)
return data
def hartigan_diptest(data):
'''
P-value according to Hartigan's dip test for unimodality.
The dip is computed using the function
dip_and_closest_unimodal_from_cdf. From this the p-value is
interpolated using a table imported from the R package diptest.
References:
Hartigan and Hartigan (1985): The dip test of unimodality.
The Annals of Statistics. 13(1).
Input:
data - one-dimensional data set.
Value:
p-value for the test.
'''
try:
p = pval_hartigan(data[~np.isnan(data)])
except:
p = np.nan
return p
def pval_hartigan(data):
xF, yF = cum_distr(data)
dip = dip_from_cdf(xF, yF)
return dip_pval_tabinterpol(dip, len(data))
def cum_distr(data, w=None):
if w is None:
w = np.ones(len(data))*1./len(data)
eps = 1e-10
data_ord = np.argsort(data)
data_sort = data[data_ord]
w_sort = w[data_ord]
data_sort, indices = unique(data_sort, return_index=True, eps=eps, is_sorted=True)
if len(indices) < len(data_ord):
w_unique = np.zeros(len(indices))
for i in range(len(indices)-1):
w_unique[i] = np.sum(w_sort[indices[i]:indices[i+1]])
w_unique[-1] = np.sum(w_sort[indices[-1]:])
w_sort = w_unique
wcum = np.cumsum(w_sort)
wcum /= wcum[-1]
N = len(data_sort)
x = np.empty(2*N)
x[2*np.arange(N)] = data_sort
x[2*np.arange(N)+1] = data_sort
y = np.empty(2*N)
y[0] = 0
y[2*np.arange(N)+1] = wcum
y[2*np.arange(N-1)+2] = wcum[:-1]
return x, y
def unique(data, return_index, eps, is_sorted=True):
if not is_sorted:
ord = np.argsort(data)
rank = np.argsort(ord)
data_sort = data[ord]
else:
data_sort = data
isunique_sort = np.ones(len(data_sort), dtype='bool')
j = 0
for i in range(1, len(data_sort)):
if data_sort[i] - data_sort[j] < eps:
isunique_sort[i] = False
else:
j = i
if not is_sorted:
isunique = isunique_sort[rank]
data_unique = data[isunique]
else:
data_unique = data[isunique_sort]
if not return_index:
return data_unique
if not is_sorted:
ind_unique = np.nonzero(isunique)[0]
else:
ind_unique = np.nonzero(isunique_sort)[0]
return data_unique, ind_unique
def dip_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12):
dip, _ = dip_and_closest_unimodal_from_cdf(xF, yF, plotting, verbose, eps)
return dip
def dip_pval_tabinterpol(dip, N):
'''
dip - dip value computed from dip_from_cdf
N - number of observations
'''
# if qDiptab_df is None:
# raise DataError("Tabulated p-values not available. See installation instructions.")
if np.isnan(N) or N < 10:
return np.nan
qDiptab_dict = {'0': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0625,
9: 0.0555555555555556,
10: 0.05,
15: 0.0341378172277919,
20: 0.033718563622065004,
30: 0.0262674485075642,
50: 0.0218544781364545,
100: 0.0164852597438403,
200: 0.0111236388849688,
500: 0.007554885975761959,
1000: 0.00541658127872122,
2000: 0.0039043999745055702,
5000: 0.00245657785440433,
10000: 0.00174954269199566,
20000: 0.00119458814106091,
40000: 0.000852415648011777,
72000: 0.000644400053256997},
'0.01': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0625,
9: 0.0613018090298924,
10: 0.0610132555623269,
15: 0.0546284208048975,
20: 0.0474333740698401,
30: 0.0395871890405749,
50: 0.0314400501999916,
100: 0.022831985803043,
200: 0.0165017735429825,
500: 0.0106403461127515,
1000: 0.0076028674530018705,
2000: 0.0054166418179658294,
5000: 0.0034480928223332603,
10000: 0.00244595133885302,
20000: 0.00173435346896287,
40000: 0.00122883479310665,
72000: 0.000916872204484283},
'0.02': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0714285714285714,
8: 0.0656911994503283,
9: 0.0658615858179315,
10: 0.0651627333214016,
15: 0.0572191260231815,
20: 0.0490891387627092,
30: 0.0414574606741673,
50: 0.0329008160470834,
100: 0.0238917486442849,
200: 0.0172594157992489,
500: 0.0111255573208294,
1000: 0.00794987834644799,
2000: 0.0056617138625232296,
5000: 0.00360473943713036,
10000: 0.00255710802275612,
20000: 0.0018119443458468102,
40000: 0.0012846930445701802,
72000: 0.0009579329467655321},
'0.05': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0725717816250742,
8: 0.0738651136071762,
9: 0.0732651142535317,
10: 0.0718321619656165,
15: 0.0610087367689692,
20: 0.052719998201553,
30: 0.0444462614069956,
50: 0.0353023819040016,
100: 0.0256559537977579,
200: 0.0185259426032926,
500: 0.0119353655328931,
1000: 0.0085216518343594,
2000: 0.00607120971135229,
5000: 0.0038632654801084897,
10000: 0.00273990955227265,
20000: 0.00194259470485893,
40000: 0.0013761765052555301,
72000: 0.00102641863872347},
'0.1': {4: 0.125,
5: 0.1,
6: 0.0833333333333333,
7: 0.0817315478539489,
8: 0.0820045917762512,
9: 0.0803941629593475,
10: 0.077966212182459,
15: 0.0642657137330444,
20: 0.0567795509056742,
30: 0.0473998525042686,
50: 0.0377279973102482,
100: 0.0273987414570948,
200: 0.0197917612637521,
500: 0.0127411306411808,
1000: 0.00909775605533253,
2000: 0.0064762535755248,
5000: 0.00412089506752692,
10000: 0.0029225480567908,
20000: 0.00207173719623868,
40000: 0.0014675150200632301,
72000: 0.0010949515421800199},
'0.2': {4: 0.125,
5: 0.1,
6: 0.0924514470941933,
7: 0.0940590181922527,
8: 0.0922700601131892,
9: 0.0890432420913848,
10: 0.0852835359834564,
15: 0.0692234107989591,
20: 0.0620134674468181,
30: 0.0516677370374349,
50: 0.0410699984399582,
100: 0.0298109370830153,
200: 0.0215233745778454,
500: 0.0138524542751814,
1000: 0.00988924521014078,
2000: 0.00703573098590029,
5000: 0.00447640050137479,
10000: 0.00317374638422465,
20000: 0.00224993202086955,
40000: 0.00159376453672466,
72000: 0.00118904090369415},
'0.3': {4: 0.125,
5: 0.1,
6: 0.103913431059949,
7: 0.10324449080087102,
8: 0.0996737189599363,
9: 0.0950811420297928,
10: 0.0903204173707099,
15: 0.0745462114365167,
20: 0.0660163872069048,
30: 0.0551037519001622,
50: 0.0437704598622665,
100: 0.0317771496530253,
200: 0.0229259769870428,
500: 0.0147536004288476,
1000: 0.0105309297090482,
2000: 0.007494212545892991,
5000: 0.00476555693102276,
10000: 0.00338072258533527,
20000: 0.00239520831473419,
40000: 0.00169668445506151,
72000: 0.00126575197699874},
'0.4': {4: 0.125,
5: 0.10872059357632902,
6: 0.113885220640212,
7: 0.110964599995697,
8: 0.10573353180273701,
9: 0.0999380897811046,
10: 0.0943334983745117,
15: 0.0792030878981762,
20: 0.0696506075066401,
30: 0.058265005347492994,
50: 0.0462925642671299,
100: 0.0336073821590387,
200: 0.024243848341112,
500: 0.0155963185751048,
1000: 0.0111322726797384,
2000: 0.007920878896017329,
5000: 0.005037040297500721,
10000: 0.0035724387653598205,
20000: 0.00253036792824665,
40000: 0.0017925341833790601,
72000: 0.00133750966361506},
'0.5': {4: 0.125,
5: 0.12156379802641401,
6: 0.123071187137781,
7: 0.11780784650433501,
8: 0.11103512984770501,
9: 0.10415356007586801,
10: 0.0977817630384725,
15: 0.083621033469191,
20: 0.0733437740592714,
30: 0.0614510857304343,
50: 0.048851155289608,
100: 0.0354621760592113,
200: 0.025584358256487003,
500: 0.0164519238025286,
1000: 0.0117439009052552,
2000: 0.008355737247680059,
5000: 0.0053123924740821294,
10000: 0.00376734715752209,
20000: 0.00266863168718114,
40000: 0.00189061261635977,
72000: 0.00141049709228472},
'0.6': {4: 0.125,
5: 0.134318918697053,
6: 0.13186973390253,
7: 0.124216086833531,
8: 0.11592005574998801,
9: 0.10800780236193198,
10: 0.102180866696628,
15: 0.0881198482202905,
20: 0.0776460662880254,
30: 0.0649164408053978,
50: 0.0516145897865757,
100: 0.0374805844550272,
200: 0.0270252129816288,
500: 0.017383057902553,
1000: 0.012405033293814,
2000: 0.00882439333812351,
5000: 0.00560929919359959,
10000: 0.00397885007249132,
20000: 0.0028181999035216,
40000: 0.00199645471886179,
72000: 0.00148936709298802},
'0.7': {4: 0.13255954878268902,
5: 0.14729879897625198,
6: 0.140564796497941,
7: 0.130409013968317,
8: 0.120561479262465,
9: 0.112512617124951,
10: 0.10996094814295099,
15: 0.093124666680253,
20: 0.0824558407118372,
30: 0.0689178762425442,
50: 0.0548121932066019,
100: 0.0398046179116599,
200: 0.0286920262150517,
500: 0.0184503949887735,
1000: 0.0131684179320803,
2000: 0.009367858207170609,
5000: 0.00595352728377949,
10000: 0.00422430013176233,
20000: 0.00299137548142077,
40000: 0.00211929748381704,
72000: 0.00158027541945626},
'0.8': {4: 0.15749736904023498,
5: 0.161085025702604,
6: 0.14941924112913002,
7: 0.136639642123068,
8: 0.125558759034845,
9: 0.12291503348081699,
10: 0.11884476721158699,
15: 0.0996694393390689,
20: 0.08834462700173701,
30: 0.0739249074078291,
50: 0.0588230482851366,
100: 0.0427283846799166,
200: 0.0308006766341406,
500: 0.0198162679782071,
1000: 0.0141377942603047,
2000: 0.01005604603884,
5000: 0.00639092280563517,
10000: 0.00453437508148542,
20000: 0.00321024899920135,
40000: 0.0022745769870358102,
72000: 0.00169651643860074},
'0.9': {4: 0.18740187880755899,
5: 0.176811998476076,
6: 0.159137064572627,
7: 0.144240669035124,
8: 0.141841067033899,
9: 0.136412639387084,
10: 0.130462149644819,
15: 0.11008749690090598,
20: 0.0972346018122903,
30: 0.0814791379390127,
50: 0.0649136324046767,
100: 0.047152783315718,
200: 0.0339967814293504,
500: 0.0218781313182203,
1000: 0.0156148055023058,
2000: 0.0111019116837591,
5000: 0.00705566126234625,
10000: 0.00500178808402368,
20000: 0.00354362220314155,
40000: 0.00250999080890397,
72000: 0.0018730618472582602},
'0.95': {4: 0.20726978858735998,
5: 0.18639179602794398,
6: 0.164769608513302,
7: 0.159903395678336,
8: 0.153978303998561,
9: 0.14660378495401902,
10: 0.139611395137099,
15: 0.118760769203664,
20: 0.105130218270636,
30: 0.0881689143126666,
50: 0.0702737877191269,
100: 0.0511279442868827,
200: 0.0368418413878307,
500: 0.0237294742633411,
1000: 0.0169343970067564,
2000: 0.0120380990328341,
5000: 0.0076506368153935,
10000: 0.00542372242836395,
20000: 0.00384330190244679,
40000: 0.00272375073486223,
72000: 0.00203178401610555},
'0.98': {4: 0.22375580462922195,
5: 0.19361253363045,
6: 0.17917654739278197,
7: 0.17519655327122302,
8: 0.16597856724751,
9: 0.157084065653166,
10: 0.150961728882481,
15: 0.128890475210055,
20: 0.11430970428125302,
30: 0.0960564383013644,
50: 0.0767095886079179,
100: 0.0558022052195208,
200: 0.0402729850316397,
500: 0.025919578977657003,
1000: 0.018513067368104,
2000: 0.0131721010552576,
5000: 0.00836821687047215,
10000: 0.00592656681022859,
20000: 0.00420258799378253,
40000: 0.00298072958568387,
72000: 0.00222356097506054},
'0.99': {4: 0.231796258864192,
5: 0.19650913979884502,
6: 0.191862827995563,
7: 0.184118659121501,
8: 0.172988528276759,
9: 0.164164643657217,
10: 0.159684158858235,
15: 0.13598356863636,
20: 0.120624043335821,
30: 0.101478558893837,
50: 0.0811998415355918,
100: 0.059024132304226,
200: 0.0426864799777448,
500: 0.0274518022761997,
1000: 0.0196080260483234,
2000: 0.0139655122281969,
5000: 0.00886357892854914,
10000: 0.00628034732880374,
20000: 0.00445774902155711,
40000: 0.00315942194040388,
72000: 0.00235782814777627},
'0.995': {4: 0.23726374382677898,
5: 0.198159967287576,
6: 0.20210197104296804,
7: 0.19101439617430602,
8: 0.179010413496374,
9: 0.172821674582338,
10: 0.16719524735674,
15: 0.14245248368127697,
20: 0.126552378036739,
30: 0.10650487144103,
50: 0.0852854646662134,
100: 0.0620425065165146,
200: 0.044958959158761,
500: 0.0288986369564301,
1000: 0.0206489568587364,
2000: 0.0146889122204488,
5000: 0.00934162787186159,
10000: 0.00661030641550873,
20000: 0.00469461513212743,
40000: 0.0033273652798148,
72000: 0.00248343580127067},
'0.998': {4: 0.241992892688593,
5: 0.19924427936243302,
6: 0.213015781111186,
7: 0.198216795232182,
8: 0.186504388711178,
9: 0.182555283567818,
10: 0.175419540856082,
15: 0.15017281653074202,
20: 0.13360135382395,
30: 0.112724636524262,
50: 0.0904847827490294,
100: 0.0658016011466099,
200: 0.0477643873749449,
500: 0.0306813505050163,
1000: 0.0219285176765082,
2000: 0.0156076779647454,
5000: 0.009932186363240291,
10000: 0.00702254699967648,
20000: 0.004994160691291679,
40000: 0.00353988965698579,
72000: 0.00264210826339498},
'0.999': {4: 0.244369839049632,
5: 0.199617527406166,
6: 0.219518627282415,
7: 0.20234101074826102,
8: 0.19448404115794,
9: 0.188658833121906,
10: 0.180611195797351,
15: 0.15545613369632802,
20: 0.138569903791767,
30: 0.117164140184417,
50: 0.0940930106666244,
100: 0.0684479731118028,
200: 0.0497198001867437,
500: 0.0320170996823189,
1000: 0.0228689168972669,
2000: 0.0162685615996248,
5000: 0.0103498795291629,
10000: 0.0073182262815645795,
20000: 0.00520917757743218,
40000: 0.00369400045486625,
72000: 0.0027524322157581},
'0.9995': {4: 0.245966625504691,
5: 0.19980094149902802,
6: 0.22433904739444602,
7: 0.205377566346832,
8: 0.200864297005026,
9: 0.19408912076824603,
10: 0.18528641605039603,
15: 0.160896499106958,
20: 0.14336916123968,
30: 0.12142585990898701,
50: 0.0974904344916743,
100: 0.0709169443994193,
200: 0.0516114611801451,
500: 0.0332452747332959,
1000: 0.023738710122235003,
2000: 0.0168874937789415,
5000: 0.0107780907076862,
10000: 0.0076065423418208,
20000: 0.005403962359243721,
40000: 0.00383345715372182,
72000: 0.0028608570740143},
'0.9998': {4: 0.24743959723326198,
5: 0.19991708183427104,
6: 0.22944933215424101,
7: 0.208306562526874,
8: 0.20884999705022897,
9: 0.19915700809389003,
10: 0.19120308390504398,
15: 0.16697940794624802,
20: 0.148940116394883,
30: 0.126733051889401,
50: 0.10228420428399698,
100: 0.0741183486081263,
200: 0.0540543978864652,
500: 0.0348335698576168,
1000: 0.0248334158891432,
2000: 0.0176505093388153,
5000: 0.0113184316868283,
10000: 0.00795640367207482,
20000: 0.00564540201704594,
40000: 0.0040079346963469605,
72000: 0.00298695044508003},
'0.9999': {4: 0.24823065965663801,
5: 0.19995902909307503,
6: 0.232714530449602,
7: 0.209866047852379,
8: 0.212556040406219,
9: 0.20288159843655804,
10: 0.19580515933918397,
15: 0.17111793515551002,
20: 0.152832538183622,
30: 0.131198578897542,
50: 0.104680624334611,
100: 0.0762579402903838,
200: 0.0558704526182638,
500: 0.0359832389317461,
1000: 0.0256126573433596,
2000: 0.0181944265400504,
5000: 0.0117329446468571,
10000: 0.0082270524584354,
20000: 0.00580460792299214,
40000: 0.00414892737222885,
72000: 0.00309340092038059},
'0.99995': {4: 0.248754269146416,
5: 0.19997839537608197,
6: 0.236548128358969,
7: 0.21096757693345103,
8: 0.21714917413729898,
9: 0.205979795735129,
10: 0.20029398089673,
15: 0.17590050570443203,
20: 0.15601016361897102,
30: 0.133691739483444,
50: 0.107496694235039,
100: 0.0785735967934979,
200: 0.0573877056330228,
500: 0.0369051995840645,
1000: 0.0265491336936829,
2000: 0.0186226037818523,
5000: 0.0119995948968375,
10000: 0.00852240989786251,
20000: 0.00599774739593151,
40000: 0.0042839159079761,
72000: 0.00319932767198801},
'0.99998': {4: 0.24930203997425898,
5: 0.199993151405815,
6: 0.2390887911995,
7: 0.212233348558702,
8: 0.22170007640450304,
9: 0.21054115498898,
10: 0.20565108964621898,
15: 0.18185667601316602,
20: 0.16131922583934502,
30: 0.137831637950694,
50: 0.11140887547015,
100: 0.0813458356889133,
200: 0.0593365901653878,
500: 0.0387221159256424,
1000: 0.027578430100535997,
2000: 0.0193001796565433,
5000: 0.0124410052027886,
10000: 0.00892863905540303,
20000: 0.00633099254378114,
40000: 0.0044187010443287895,
72000: 0.00332688234611187},
'0.99999': {4: 0.24945965232322498,
5: 0.199995525025673,
6: 0.24010356643629502,
7: 0.21266103831250602,
8: 0.225000835357532,
9: 0.21180033095039003,
10: 0.209682048785853,
15: 0.185743454151004,
20: 0.165568255916749,
30: 0.14155750962435099,
50: 0.113536607717411,
100: 0.0832963013755522,
200: 0.0607646310473911,
500: 0.039930259057650005,
1000: 0.0284430733108,
2000: 0.0196241518040617,
5000: 0.0129467396733128,
10000: 0.009138539330002129,
20000: 0.00656987109386762,
40000: 0.00450818604569179,
72000: 0.00339316094477355},
'1': {4: 0.24974836247845,
5: 0.199999835639211,
6: 0.24467288361776798,
7: 0.21353618608817,
8: 0.23377291968768302,
9: 0.21537991431762502,
10: 0.221530282182963,
15: 0.19224056333056197,
20: 0.175834459522789,
30: 0.163833046059817,
50: 0.11788671686531199,
100: 0.0926780423096737,
200: 0.0705309107882395,
500: 0.0431448163617178,
1000: 0.0313640941982108,
2000: 0.0213081254074584,
5000: 0.014396063834027,
10000: 0.00952234579566773,
20000: 0.006858294480462271,
40000: 0.00513477467565583,
72000: 0.00376331697005859}}
qDiptab_df = pd.DataFrame(qDiptab_dict)
diptable = np.array(qDiptab_df)
ps = np.array(qDiptab_df.columns).astype(float)
Ns = np.array(qDiptab_df.index)
if N >= Ns[-1]:
dip = transform_dip_to_other_nbr_pts(dip, N, Ns[-1]-0.1)
N = Ns[-1]-0.1
iNlow = np.nonzero(Ns < N)[0][-1]
qN = (N-Ns[iNlow])/(Ns[iNlow+1]-Ns[iNlow])
dip_sqrtN = np.sqrt(N)*dip
dip_interpol_sqrtN = (
np.sqrt(Ns[iNlow])*diptable[iNlow, :] + qN*(
np.sqrt(Ns[iNlow+1])*diptable[iNlow+1, :]-np.sqrt(Ns[iNlow])*diptable[iNlow, :]))
if not (dip_interpol_sqrtN < dip_sqrtN).any():
return 1
iplow = np.nonzero(dip_interpol_sqrtN < dip_sqrtN)[0][-1]
if iplow == len(dip_interpol_sqrtN) - 1:
return 0
qp = (dip_sqrtN-dip_interpol_sqrtN[iplow])/(dip_interpol_sqrtN[iplow+1]-dip_interpol_sqrtN[iplow])
p_interpol = ps[iplow] + qp*(ps[iplow+1]-ps[iplow])
return 1 - p_interpol
def transform_dip_to_other_nbr_pts(dip_n, n, m):
dip_m = np.sqrt(n/m)*dip_n
return dip_m
def dip_and_closest_unimodal_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12):
'''
Dip computed as distance between empirical distribution function (EDF) and
cumulative distribution function for the unimodal distribution with
smallest such distance. The optimal unimodal distribution is found by
the algorithm presented in
Hartigan (1985): Computation of the dip statistic to test for
unimodaliy. Applied Statistics, vol. 34, no. 3
If the plotting option is enabled the optimal unimodal distribution
function is plotted along with (xF, yF-dip) and (xF, yF+dip)
xF - x-coordinates for EDF
yF - y-coordinates for EDF
'''
## TODO! Preprocess xF and yF so that yF increasing and xF does
## not have more than two copies of each x-value.
if (xF[1:]-xF[:-1] < -eps).any():
raise ValueError('Need sorted x-values to compute dip')
if (yF[1:]-yF[:-1] < -eps).any():
raise ValueError('Need sorted y-values to compute dip')
# if plotting:
# Nplot = 5
# bfig = plt.figure(figsize=(12, 3))
# i = 1 # plot index
D = 0 # lower bound for dip*2
# [L, U] is interval where we still need to find unimodal function,
# the modal interval
L = 0
U = len(xF) - 1
# iGfin are the indices of xF where the optimal unimodal distribution is greatest
# convex minorant to (xF, yF+dip)
# iHfin are the indices of xF where the optimal unimodal distribution is least
# concave majorant to (xF, yF-dip)
iGfin = L
iHfin = U
while 1:
iGG = greatest_convex_minorant_sorted(xF[L:(U+1)], yF[L:(U+1)])
iHH = least_concave_majorant_sorted(xF[L:(U+1)], yF[L:(U+1)])
iG = np.arange(L, U+1)[iGG]
iH = np.arange(L, U+1)[iHH]
# Interpolate. First and last point are in both and does not need
# interpolation. Might cause trouble if included due to possiblity
# of infinity slope at beginning or end of interval.
if iG[0] != iH[0] or iG[-1] != iH[-1]:
raise ValueError('Convex minorant and concave majorant should start and end at same points.')
hipl = np.interp(xF[iG[1:-1]], xF[iH], yF[iH])
gipl = np.interp(xF[iH[1:-1]], xF[iG], yF[iG])
hipl = np.hstack([yF[iH[0]], hipl, yF[iH[-1]]])
gipl = np.hstack([yF[iG[0]], gipl, yF[iG[-1]]])
#hipl = lin_interpol_sorted(xF[iG], xF[iH], yF[iH])
#gipl = lin_interpol_sorted(xF[iH], xF[iG], yF[iG])
# Find largest difference between GCM and LCM.
gdiff = hipl - yF[iG]
hdiff = yF[iH] - gipl
imaxdiffg = np.argmax(gdiff)
imaxdiffh = np.argmax(hdiff)
d = max(gdiff[imaxdiffg], hdiff[imaxdiffh])
# # Plot current GCM and LCM.
# if plotting:
# if i > Nplot:
# bfig = plt.figure(figsize=(12, 3))
# i = 1
# bax = bfig.add_subplot(1, Nplot, i)
# bax.plot(xF, yF, color='red')
# bax.plot(xF, yF-d/2, color='black')
# bax.plot(xF, yF+d/2, color='black')
# bax.plot(xF[iG], yF[iG]+d/2, color='blue')
# bax.plot(xF[iH], yF[iH]-d/2, color='blue')
# if d <= D:
# if verbose:
# print("Difference in modal interval smaller than current dip")
# break
# Find new modal interval so that largest difference is at endpoint
# and set d to largest distance between current GCM and LCM.
if gdiff[imaxdiffg] > hdiff[imaxdiffh]:
L0 = iG[imaxdiffg]
U0 = iH[iH >= L0][0]
else:
U0 = iH[imaxdiffh]
L0 = iG[iG <= U0][-1]
# Add points outside the modal interval to the final GCM and LCM.
iGfin = np.hstack([iGfin, iG[(iG <= L0)*(iG > L)]])
iHfin = np.hstack([iH[(iH >= U0)*(iH < U)], iHfin])
# # Plot new modal interval
# if plotting:
# ymin, ymax = bax.get_ylim()
# bax.axvline(xF[L0], ymin, ymax, color='orange')
# bax.axvline(xF[U0], ymin, ymax, color='red')
# bax.set_xlim(xF[L]-.1*(xF[U]-xF[L]), xF[U]+.1*(xF[U]-xF[L]))
# Compute new lower bound for dip*2
# i.e. largest difference outside modal interval
gipl = np.interp(xF[L:(L0+1)], xF[iG], yF[iG])
D = max(D, np.amax(yF[L:(L0+1)] - gipl))
hipl = np.interp(xF[U0:(U+1)], xF[iH], yF[iH])
D = max(D, np.amax(hipl - yF[U0:(U+1)]))
if xF[U0]-xF[L0] < eps:
if verbose:
print("Modal interval zero length")
break
# if plotting:
# mxpt = np.argmax(yF[L:(L0+1)] - gipl)
# bax.plot([xF[L:][mxpt], xF[L:][mxpt]], [yF[L:][mxpt]+d/2,
# gipl[mxpt]+d/2], '+', color='red')
# mxpt = np.argmax(hipl - yF[U0:(U+1)])
# bax.plot([xF[U0:][mxpt], xF[U0:][mxpt]], [yF[U0:][mxpt]-d/2,
# hipl[mxpt]-d/2], '+', color='red')
# i += 1
# Change modal interval
L = L0
U = U0
if d <= D:
if verbose:
print("Difference in modal interval smaller than new dip")
break
# if plotting:
# # Add modal interval to figure
# bax.axvline(xF[L0], ymin, ymax, color='green', linestyle='dashed')
# bax.axvline(xF[U0], ymin, ymax, color='green', linestyle='dashed')
# ## Plot unimodal function (not distribution function)
# bfig = plt.figure()
# bax = bfig.add_subplot(1, 1, 1)
# bax.plot(xF, yF, color='red')
# bax.plot(xF, yF-D/2, color='black')
# bax.plot(xF, yF+D/2, color='black')
# Find string position in modal interval
iM = np.arange(iGfin[-1], iHfin[0]+1)
yM_lower = yF[iM]-D/2
yM_lower[0] = yF[iM[0]]+D/2
iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower)
iM_concave = iM[iMM_concave]
#bax.plot(xF[iM], yM_lower, color='orange')
#bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red')
lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave])
try:
mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]]
#bax.axvline(xF[mode], color='green', linestyle='dashed')
except IndexError:
iM_convex = np.zeros(0, dtype='i')
else:
after_mode = iM_concave > mode
iM_concave = iM_concave[after_mode]
iMM_concave = iMM_concave[after_mode]
iM = iM[iM <= mode]
iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])]
# if plotting:
# bax.plot(xF[np.hstack([iGfin, iM_convex, iM_concave, iHfin])],
# np.hstack([yF[iGfin] + D/2, yF[iM_convex] + D/2,
# yM_lower[iMM_concave], yF[iHfin] - D/2]), color='blue')
# #bax.plot(xF[iM], yM_lower, color='orange')
# ## Plot unimodal distribution function
# bfig = plt.figure()
# bax = bfig.add_subplot(1, 1, 1)
# bax.plot(xF, yF, color='red')
# bax.plot(xF, yF-D/2, color='black')
# bax.plot(xF, yF+D/2, color='black')
# Find string position in modal interval
iM = np.arange(iGfin[-1], iHfin[0]+1)
yM_lower = yF[iM]-D/2
yM_lower[0] = yF[iM[0]]+D/2
iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower)
iM_concave = iM[iMM_concave]
#bax.plot(xF[iM], yM_lower, color='orange')
#bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red')
lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave])
try:
mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]]
#bax.axvline(xF[mode], color='green', linestyle='dashed')
except IndexError:
iM_convex = np.zeros(0, dtype='i')
else:
after_mode = iM_concave > mode
iM_concave = iM_concave[after_mode]
iMM_concave = iMM_concave[after_mode]
iM = iM[iM <= mode]
iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])]
# Closest unimodal curve
xU = xF[np.hstack([iGfin[:-1], iM_convex, iM_concave, iHfin[1:]])]
yU = np.hstack([yF[iGfin[:-1]] + D/2, yF[iM_convex] + D/2,
yM_lower[iMM_concave], yF[iHfin[1:]] - D/2])
# Add points so unimodal curve goes from 0 to 1
k_start = (yU[1]-yU[0])/(xU[1]-xU[0]+1e-5)
xU_start = xU[0] - yU[0]/(k_start+1e-5)
k_end = (yU[-1]-yU[-2])/(xU[-1]-xU[-2]+1e-5)
xU_end = xU[-1] + (1-yU[-1])/(k_end+1e-5)
xU = np.hstack([xU_start, xU, xU_end])
yU = np.hstack([0, yU, 1])
# if plotting:
# bax.plot(xU, yU, color='blue')
# #bax.plot(xF[iM], yM_lower, color='orange')
# plt.show()
return D/2, (xU, yU)
def greatest_convex_minorant_sorted(x, y):
i = least_concave_majorant_sorted(x, -y)
return i
def least_concave_majorant_sorted(x, y, eps=1e-12):
i = [0]
icurr = 0
while icurr < len(x) - 1:
if np.abs(x[icurr+1]-x[icurr]) > eps:
q = (y[(icurr+1):]-y[icurr])/(x[(icurr+1):]-x[icurr])
icurr += 1 + np.argmax(q)
i.append(icurr)
elif y[icurr+1] > y[icurr] or icurr == len(x)-2:
icurr += 1
i.append(icurr)
elif np.abs(x[icurr+2]-x[icurr]) > eps:
q = (y[(icurr+2):]-y[icurr])/(x[(icurr+2):]-x[icurr])
icurr += 2 + np.argmax(q)
i.append(icurr)
else:
print("x[icurr] = {}, x[icurr+1] = {}, x[icurr+2] = {}".format(x[icurr],
x[icurr+1], x[icurr+2]))
raise ValueError('Maximum two copies of each x-value allowed')
return np.array(i)
class KernelDensityDerivative(object):
def __init__(self, data, deriv_order):
if deriv_order == 0:
self.kernel = lambda u: np.exp(-u**2/2)
elif deriv_order == 2:
self.kernel = lambda u: (u**2-1)*np.exp(-u**2/2)
else:
raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
self.deriv_order = deriv_order
self.h = silverman_bandwidth(data, deriv_order)
self.datah = data/self.h
def evaluate(self, x):
xh = np.array(x).reshape(-1)/self.h
res = np.zeros(len(xh))
if len(xh) > len(self.datah): # loop over data
for data_ in self.datah:
res += self.kernel(data_-xh)
else: # loop over x
for i, x_ in enumerate(xh):
res[i] = np.sum(self.kernel(self.datah-x_))
return res*1./(np.sqrt(2*np.pi)*self.h**(1+self.deriv_order)*len(self.datah))
def score_samples(self, x):
return self.evaluate(x)
# def plot(self, ax=None):
# x = self.h*np.linspace(np.min(self.datah)-5, np.max(self.datah)+5, 200)
# y = self.evaluate(x)
# if ax is None:
# fig, ax = plt.subplots()
# ax.plot(x, y)
def silverman_bandwidth(data, deriv_order=0):
sigmahat = np.std(data, ddof=1)
return sigmahat*bandwidth_factor(data.shape[0], deriv_order)
def calibrated_dip_test(data, N_bootstrap=1000):
xF, yF = cum_distr(data)
dip = dip_from_cdf(xF, yF)
n_eval = 512
f_hat = KernelDensityDerivative(data, 0)
f_bis_hat = KernelDensityDerivative(data, 2)
x = np.linspace(np.min(data), np.max(data), n_eval)
f_hat_eval = f_hat.evaluate(x)
ind_x0_hat = np.argmax(f_hat_eval)
d_hat = np.abs(f_bis_hat.evaluate(x[ind_x0_hat]))/f_hat_eval[ind_x0_hat]**3
ref_distr = select_calibration_distribution(d_hat)
ref_dips = np.zeros(N_bootstrap)
for i in range(N_bootstrap):
samp = ref_distr.sample(len(data))
xF, yF = cum_distr(samp)
ref_dips[i] = dip_from_cdf(xF, yF)
return np.mean(ref_dips > dip)
def select_calibration_distribution(d_hat):
# data_dir = os.path.join('.', 'data')
# print(data_dir)
# with open(os.path.join(data_dir, 'gammaval.pkl'), 'r') as f:
# savedat = pickle.load(f)
savedat = {'beta_betadistr': np.array([1.0,
2.718281828459045,
7.38905609893065,
20.085536923187668,
54.598150033144236,
148.4131591025766,
403.4287934927351,
1096.6331584284585,
2980.9579870417283,
8103.083927575384,
22026.465794806718,
59874.14171519782,
162754.79141900392,
442413.3920089205,
1202604.2841647768,
3269017.3724721107,
8886110.520507872,
24154952.7535753,
65659969.13733051,
178482300.96318725]),
'beta_studentt': np.array([0.5,
1.3591409142295225,
3.694528049465325,
10.042768461593834,
27.299075016572118,
74.2065795512883,
201.71439674636756,
548.3165792142293,
1490.4789935208642,
4051.541963787692,
11013.232897403359,
29937.07085759891,
81377.39570950196,
221206.69600446025,
601302.1420823884,
1634508.6862360553,
4443055.260253936,
12077476.37678765,
32829984.568665255,
89241150.48159362]),
'gamma_betadistr': np.array([0.0,
4.3521604788918555,
5.619663288128619,
6.045132289787511,
6.196412312629769,
6.251371005194619,
6.271496014102775,
6.2788870215785195,
6.281604322090273,
6.282603731161307,
6.282971362173459,
6.283106602190213,
6.283156350612787,
6.283174653445515,
6.2831813886918635,
6.283183865648734,
6.283184776870057,
6.283185112089616,
6.283185235410011,
6.2831852807770385]),
'gamma_studentt': np.array([np.inf,
13.130440672051542,
7.855693794235218,
6.787835735957803,
6.46039623388715,
6.3473005818376755,
6.306629302123698,
6.291790708027913,
6.286348471156239,
6.284348620590986,
6.283613218820035,
6.283342721628752,
6.283243215161844,
6.28320661564662,
6.283193150917383,
6.283188190242287,
6.283186367798792,
6.28318569735954,
6.283185450718775,
6.283185359984703])}
if np.abs(d_hat-np.pi) < 1e-4:
return RefGaussian()
if d_hat < 2*np.pi: # beta distribution
gamma = lambda beta: 2*(beta-1)*betafun(beta, 1.0/2)**2 - d_hat
i = np.searchsorted(savedat['gamma_betadistr'], d_hat)
beta_left = savedat['beta_betadistr'][i-1]
beta_right = savedat['beta_betadistr'][i]
beta = brentq(gamma, beta_left, beta_right)
return RefBeta(beta)
# student t distribution
gamma = lambda beta: 2*beta*betafun(beta-1./2, 1./2)**2 - d_hat
i = np.searchsorted(-savedat['gamma_studentt'], -d_hat)
beta_left = savedat['beta_studentt'][i-1]
beta_right = savedat['beta_studentt'][i]
beta = brentq(gamma, beta_left, beta_right)
return RefStudentt(beta)
class RefGaussian(object):
def sample(self, n):
return np.random.randn(n)
class RefBeta(object):
def __init__(self, beta):
self.beta = beta
def sample(self, n):
return np.random.beta(self.beta, self.beta, n)
class RefStudentt(object):
def __init__(self, beta):
self.beta = beta
def sample(self, n):
dof = 2*self.beta-1
return 1./np.sqrt(dof)*np.random.standard_t(dof, n)
|
anntzer/mplcursors
|
lib/mplcursors/_pick_info.py
|
_register_scatter
|
python
|
def _register_scatter():
@functools.wraps(PathCollection.__init__)
def __init__(self, *args, **kwargs):
_nonscatter_pathcollections.add(self)
return __init__.__wrapped__(self, *args, **kwargs)
PathCollection.__init__ = __init__
@functools.wraps(Axes.scatter)
def scatter(*args, **kwargs):
paths = scatter.__wrapped__(*args, **kwargs)
with suppress(KeyError):
_nonscatter_pathcollections.remove(paths)
return paths
Axes.scatter = scatter
|
Patch `PathCollection` and `scatter` to register their return values.
This registration allows us to distinguish `PathCollection`s created by
`Axes.scatter`, which should use point-like picking, from others, which
should use path-like picking. The former is more common, so we store the
latter instead; this also lets us guess the type better if this module is
imported late.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L37-L60
| null |
# Unsupported Artist classes: subclasses of AxesImage, QuadMesh (upstream could
# have a `format_coord`-like method); PolyCollection (picking is not well
# defined).
from collections import ChainMap, namedtuple
from contextlib import suppress
import copy
import functools
import inspect
from inspect import Signature
import itertools
from numbers import Integral
import re
import warnings
from weakref import WeakSet
from matplotlib import cbook
from matplotlib.axes import Axes
from matplotlib.backend_bases import RendererBase
from matplotlib.collections import (
LineCollection, PatchCollection, PathCollection)
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
from matplotlib.figure import Figure
from matplotlib.image import AxesImage
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, PathPatch, Polygon, Rectangle
from matplotlib.quiver import Barbs, Quiver
from matplotlib.text import Text
from matplotlib.transforms import Affine2D
import numpy as np
Integral.register(np.integer) # Back-compatibility for numpy 1.7, 1.8.
PATCH_PICKRADIUS = 5 # FIXME Patches do not provide `pickradius`.
_nonscatter_pathcollections = WeakSet()
_register_scatter()
def _is_scatter(artist):
return (isinstance(artist, PathCollection)
and artist not in _nonscatter_pathcollections)
def _artist_in_container(container):
return next(filter(None, container.get_children()))
class ContainerArtist:
"""Workaround to make containers behave more like artists."""
def __init__(self, container):
self.container = container # Guaranteed to be nonempty.
# We can't weakref the Container (which subclasses tuple), so
# we instead create a reference cycle between the Container and
# the ContainerArtist; as no one else strongly references the
# ContainerArtist, it will get GC'd whenever the Container is.
vars(container).setdefault(
"_{}__keep_alive".format(__class__.__name__), []).append(self)
def __str__(self):
return "<{}({})>".format(type(self).__name__, self.container)
def __repr__(self):
return "<{}({!r})>".format(type(self).__name__, self.container)
figure = property(lambda self: _artist_in_container(self.container).figure)
axes = property(lambda self: _artist_in_container(self.container).axes)
class AttrArray(np.ndarray):
"""An array subclass that can store additional attributes."""
def __new__(cls, array):
return np.asarray(array).view(cls)
def _with_attrs(array, **kwargs):
array = AttrArray(array)
for k, v in kwargs.items():
setattr(array, k, v)
return array
Selection = namedtuple("Selection", "artist target dist annotation extras")
# Override equality to identity: Selections should be considered immutable
# (with mutable fields though) and we don't want to trigger casts of array
# equality checks to booleans. We don't need to override comparisons because
# artists are already non-comparable.
Selection.__eq__ = lambda self, other: self is other
Selection.__ne__ = lambda self, other: self is not other
try:
Selection.artist.__doc__ = (
"The selected artist.")
Selection.target.__doc__ = (
"The point picked within the artist, in data coordinates.")
Selection.dist.__doc__ = (
"The distance from the click to the target, in pixels.")
Selection.annotation.__doc__ = (
"The instantiated `matplotlib.text.Annotation`.")
Selection.extras.__doc__ = (
"An additional list of artists (e.g., highlighters) that will be "
"cleared at the same time as the annotation.")
except AttributeError: # Read-only in Py3.4.
pass
@functools.singledispatch
def compute_pick(artist, event):
"""
Find whether *artist* has been picked by *event*.
If it has, return the appropriate `Selection`; otherwise return ``None``.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn("Pick support for {} is missing.".format(type(artist)))
class Index:
def __init__(self, i, x, y):
self.int = i
self.x = x
self.y = y
def floor(self):
return self.int
def ceil(self):
return self.int if max(self.x, self.y) == 0 else self.int + 1
def __format__(self, fmt):
return "{0.int}.(x={0.x:{1}}, y={0.y:{1}})".format(self, fmt)
def __str__(self):
return format(self, "")
@classmethod
def pre_index(cls, n_pts, index):
i, frac = divmod(index, 1)
i, odd = divmod(i, 2)
x, y = (0, frac) if not odd else (frac, 1)
return cls(i, x, y)
@classmethod
def post_index(cls, n_pts, index):
i, frac = divmod(index, 1)
i, odd = divmod(i, 2)
x, y = (frac, 0) if not odd else (1, frac)
return cls(i, x, y)
@classmethod
def mid_index(cls, n_pts, index):
i, frac = divmod(index, 1)
if i == 0:
frac = .5 + frac / 2
elif i == 2 * n_pts - 2: # One less line than points.
frac = frac / 2
quot, odd = divmod(i, 2)
if not odd:
if frac < .5:
i = quot - 1
x, y = frac + .5, 1
else:
i = quot
x, y = frac - .5, 0
else:
i = quot
x, y = .5, frac
return cls(i, x, y)
def _compute_projection_pick(artist, path, xy):
"""
Project *xy* on *path* to obtain a `Selection` for *artist*.
*path* is first transformed to screen coordinates using the artist
transform, and the target of the returned `Selection` is transformed
back to data coordinates using the artist *axes* inverse transform. The
`Selection` `index` is returned as a float. This function returns ``None``
for degenerate inputs.
The caller is responsible for converting the index to the proper class if
needed.
"""
transform = artist.get_transform().frozen()
tpath = (path.cleaned(transform) if transform.is_affine
# `cleaned` only handles affine transforms.
else transform.transform_path(path).cleaned())
# `cleaned` should return a path where the first element is `MOVETO`, the
# following are `LINETO` or `CLOSEPOLY`, and the last one is `STOP`, i.e.
# codes = path.codes
# assert (codes[0], codes[-1]) == (path.MOVETO, path.STOP)
# assert np.in1d(codes[1:-1], [path.LINETO, path.CLOSEPOLY]).all()
vertices = tpath.vertices[:-1]
codes = tpath.codes[:-1]
vertices[codes == tpath.CLOSEPOLY] = vertices[0]
# Unit vectors for each segment.
us = vertices[1:] - vertices[:-1]
ls = np.hypot(*us.T)
with np.errstate(invalid="ignore"):
# Results in 0/0 for repeated consecutive points.
us /= ls[:, None]
# Vectors from each vertex to the event (overwritten below).
vs = xy - vertices[:-1]
# Clipped dot products -- `einsum` cannot be done in place, `clip` can.
# `clip` can trigger invalid comparisons if there are nan points.
with np.errstate(invalid="ignore"):
dot = np.clip(np.einsum("ij,ij->i", vs, us), 0, ls, out=vs[:, 0])
# Projections.
projs = vertices[:-1] + dot[:, None] * us
ds = np.hypot(*(xy - projs).T, out=vs[:, 1])
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError): # See above re: exceptions caught.
return
else:
target = AttrArray(
artist.axes.transData.inverted().transform_point(projs[argmin]))
target.index = (
(argmin + dot[argmin] / ls[argmin])
/ (path._interpolation_steps / tpath._interpolation_steps))
return Selection(artist, target, dmin, None, None)
def _untransform(orig_xy, screen_xy, ax):
"""
Return data coordinates to place an annotation at screen coordinates
*screen_xy* in axes *ax*.
*orig_xy* are the "original" coordinates as stored by the artist; they are
transformed to *screen_xy* by whatever transform the artist uses. If the
artist uses ``ax.transData``, just return *orig_xy*; else, apply
``ax.transData.inverse()`` to *screen_xy*. (The first case is more
accurate than always applying ``ax.transData.inverse()``.)
"""
tr_xy = ax.transData.transform(orig_xy)
return (
orig_xy
if ((tr_xy == screen_xy) | np.isnan(tr_xy) & np.isnan(screen_xy)).all()
else ax.transData.inverted().transform(screen_xy))
@compute_pick.register(Line2D)
def _(artist, event):
# No need to call `line.contains` as we're going to redo the work anyways
# (also see matplotlib/matplotlib#6645, though that's fixed in mpl2.1).
# Always work in screen coordinates, as this is how we need to compute
# distances. Note that the artist transform may be different from the axes
# transform (e.g., for axvline).
xy = event.x, event.y
data_xy = artist.get_xydata()
data_screen_xy = artist.get_transform().transform(data_xy)
sels = []
# If markers are visible, find the closest vertex.
if artist.get_marker() not in ["None", "none", " ", "", None]:
ds = np.hypot(*(xy - data_screen_xy).T)
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError):
# numpy 1.7.0's `nanargmin([nan])` returns nan, so
# `ds[argmin]` raises IndexError. In later versions of numpy,
# `nanargmin([nan])` raises ValueError (the release notes for 1.8.0
# are incorrect on this topic).
pass
else:
# More precise than transforming back.
target = _with_attrs(
_untransform(
data_xy[argmin], data_screen_xy[argmin], artist.axes),
index=argmin)
sels.append(Selection(artist, target, dmin, None, None))
# If lines are visible, find the closest projection.
if (artist.get_linestyle() not in ["None", "none", " ", "", None]
and len(artist.get_xydata()) > 1):
sel = _compute_projection_pick(artist, artist.get_path(), xy)
if sel is not None:
sel.target.index = {
"_draw_lines": lambda _, index: index,
"_draw_steps_pre": Index.pre_index,
"_draw_steps_mid": Index.mid_index,
"_draw_steps_post": Index.post_index}[
Line2D.drawStyles[artist.get_drawstyle()]](
len(data_xy), sel.target.index)
sels.append(sel)
sel = min(sels, key=lambda sel: sel.dist, default=None)
return sel if sel and sel.dist < artist.get_pickradius() else None
@compute_pick.register(PathPatch)
@compute_pick.register(Polygon)
@compute_pick.register(Rectangle)
def _(artist, event):
sel = _compute_projection_pick(
artist, artist.get_path(), (event.x, event.y))
if sel and sel.dist < PATCH_PICKRADIUS:
return sel
@compute_pick.register(LineCollection)
@compute_pick.register(PatchCollection)
@compute_pick.register(PathCollection)
def _(artist, event):
# Use the C implementation to prune the list of segments.
contains, info = artist.contains(event)
if not contains:
return
offsets = artist.get_offsets()
paths = artist.get_paths()
if _is_scatter(artist):
inds = info["ind"]
offsets = artist.get_offsets()[inds]
offsets_screen = artist.get_offset_transform().transform(offsets)
ds = np.hypot(*(offsets_screen - [event.x, event.y]).T)
argmin = ds.argmin()
target = _with_attrs(
_untransform(offsets[argmin], offsets_screen[argmin], artist.axes),
index=inds[argmin])
return Selection(artist, target, ds[argmin], None, None)
else:
# Note that this won't select implicitly closed paths.
sels = [
_compute_projection_pick(
artist,
Affine2D().translate(*offsets[ind % len(offsets)])
.transform_path(paths[ind % len(paths)]),
(event.x, event.y))
for ind in info["ind"]]
sel, index = min(
((sel, info["ind"][idx]) for idx, sel in enumerate(sels) if sel),
key=lambda sel_idx: sel_idx[0].dist,
default=(None, None))
if sel:
sel = sel._replace(artist=artist)
sel.target.index = (index, sel.target.index)
if (isinstance(artist, PatchCollection)
and sel.dist >= PATCH_PICKRADIUS):
sel = None
return sel
@compute_pick.register(AxesImage)
def _(artist, event):
if type(artist) != AxesImage:
# Skip and warn on subclasses (`NonUniformImage`, `PcolorImage`) as
# they do not implement `contains` correctly. Even if they did, they
# would not support moving as we do not know where a given index maps
# back physically.
return compute_pick.dispatch(object)(artist, event)
contains, _ = artist.contains(event)
if not contains:
return
ns = np.asarray(artist.get_array().shape[:2])[::-1] # (y, x) -> (x, y)
xy = np.array([event.xdata, event.ydata])
xmin, xmax, ymin, ymax = artist.get_extent()
# Handling of "upper" origin copied from AxesImage.get_cursor_data.
if artist.origin == "upper":
ymin, ymax = ymax, ymin
low, high = np.array([[xmin, ymin], [xmax, ymax]])
idxs = ((xy - low) / (high - low) * ns).astype(int)[::-1]
target = _with_attrs(xy, index=tuple(idxs))
return Selection(artist, target, 0, None, None)
@compute_pick.register(Barbs)
@compute_pick.register(Quiver)
def _(artist, event):
offsets = artist.get_offsets()
offsets_screen = artist.get_offset_transform().transform(offsets)
ds = np.hypot(*(offsets_screen - [event.x, event.y]).T)
argmin = np.nanargmin(ds)
if ds[argmin] < artist.get_pickradius():
target = _with_attrs(
_untransform(offsets[argmin], offsets_screen[argmin], artist.axes),
index=argmin)
return Selection(artist, target, ds[argmin], None, None)
else:
return None
@compute_pick.register(Text)
def _(artist, event):
return
@compute_pick.register(ContainerArtist)
def _(artist, event):
sel = compute_pick(artist.container, event)
if sel:
sel = sel._replace(artist=artist)
return sel
@compute_pick.register(BarContainer)
def _(container, event):
try:
(idx, patch), = {
(idx, patch) for idx, patch in enumerate(container.patches)
if patch.contains(event)[0]}
except ValueError:
return
target = _with_attrs([event.xdata, event.ydata], index=idx)
if patch.sticky_edges.x:
target[0], = (
x for x in [patch.get_x(), patch.get_x() + patch.get_width()]
if x not in patch.sticky_edges.x)
if patch.sticky_edges.y:
target[1], = (
y for y in [patch.get_y(), patch.get_y() + patch.get_height()]
if y not in patch.sticky_edges.y)
return Selection(None, target, 0, None, None)
@compute_pick.register(ErrorbarContainer)
def _(container, event):
data_line, cap_lines, err_lcs = container
sel_data = compute_pick(data_line, event) if data_line else None
sel_err = min(
filter(None, (compute_pick(err_lc, event) for err_lc in err_lcs)),
key=lambda sel: sel.dist, default=None)
if (sel_data and sel_data.dist < getattr(sel_err, "dist", np.inf)):
return sel_data
elif sel_err:
idx, _ = sel_err.target.index
if data_line:
target = _with_attrs(data_line.get_xydata()[idx], index=idx)
else: # We can't guess the original data in that case!
return
return Selection(None, target, 0, None, None)
else:
return
@compute_pick.register(StemContainer)
def _(container, event):
sel = compute_pick(container.markerline, event)
if sel:
return sel
idx_sel = min(filter(lambda idx_sel: idx_sel[1] is not None,
((idx, compute_pick(line, event))
for idx, line in enumerate(container.stemlines))),
key=lambda idx_sel: idx_sel[1].dist, default=None)
if idx_sel:
idx, _ = idx_sel
target = _with_attrs(
container.stemlines[idx].get_xydata()[-1], index=idx)
return Selection(None, target, 0, None, None)
def _call_with_selection(func):
"""Decorator that passes a `Selection` built from the non-kwonly args."""
wrapped_kwonly_params = [
param for param in inspect.signature(func).parameters.values()
if param.kind == param.KEYWORD_ONLY]
sel_sig = inspect.signature(Selection)
default_sel_sig = sel_sig.replace(
parameters=[param.replace(default=None) if param.default is param.empty
else param
for param in sel_sig.parameters.values()])
@functools.wraps(func)
def wrapper(*args, **kwargs):
extra_kw = {param.name: kwargs.pop(param.name)
for param in wrapped_kwonly_params if param.name in kwargs}
ba = default_sel_sig.bind(*args, **kwargs)
# apply_defaults
ba.arguments = ChainMap(
ba.arguments,
{name: param.default
for name, param in default_sel_sig.parameters.items()
if param.default is not param.empty})
sel = Selection(*ba.args, **ba.kwargs)
return func(sel, **extra_kw)
wrapper.__signature__ = Signature(
list(sel_sig.parameters.values()) + wrapped_kwonly_params)
return wrapper
def _format_coord_unspaced(ax, xy):
# Un-space-pad, remove empty coordinates from the output of
# `format_{x,y}data`, and rejoin with newlines.
return "\n".join(
line for line, empty in zip(
re.split(",? +", ax.format_coord(*xy)),
itertools.chain(["x=", "y=", "z="], itertools.repeat(None)))
if line != empty).rstrip()
@functools.singledispatch
@_call_with_selection
def get_ann_text(sel):
"""
Compute an annotating text for a `Selection` (passed **unpacked**).
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
"Annotation support for {} is missing".format(type(sel.artist)))
return ""
def _strip_math(s):
return cbook.strip_math(s) if len(s) >= 2 and s[0] == s[-1] == "$" else s
def _format_scalarmappable_value(artist, idx): # matplotlib/matplotlib#12473.
data = artist.get_array()[idx]
if np.ndim(data) == 0:
if not artist.colorbar:
fig = Figure()
ax = fig.subplots()
artist.colorbar = fig.colorbar(artist, cax=ax)
# This hack updates the ticks without actually paying the cost of
# drawing (RendererBase.draw_path raises NotImplementedError).
try:
ax.yaxis.draw(RendererBase())
except NotImplementedError:
pass
fmt = artist.colorbar.formatter.format_data_short
return "[" + _strip_math(fmt(data).strip()) + "]"
else:
text = artist.format_cursor_data(data)
# get_cursor_data changed in Matplotlib 3.
if not re.match(r"\A\[.*\]\Z", text):
text = "[{}]".format(text)
return text
@get_ann_text.register(Line2D)
@get_ann_text.register(LineCollection)
@get_ann_text.register(PatchCollection)
@get_ann_text.register(PathCollection)
@get_ann_text.register(Patch)
@_call_with_selection
def _(sel):
artist = sel.artist
label = artist.get_label() or ""
text = _format_coord_unspaced(artist.axes, sel.target)
if (_is_scatter(artist)
# Heuristic: is the artist colormapped?
# Note that this doesn't handle size-mapping (which is more likely
# to involve an arbitrary scaling).
and artist.get_array() is not None
and len(artist.get_array()) == len(artist.get_offsets())):
value = _format_scalarmappable_value(artist, sel.target.index)
text = "{}\n{}".format(text, value)
if re.match("[^_]", label):
text = "{}\n{}".format(label, text)
return text
_Event = namedtuple("_Event", "xdata ydata")
@get_ann_text.register(AxesImage)
@_call_with_selection
def _(sel):
artist = sel.artist
text = _format_coord_unspaced(artist.axes, sel.target)
cursor_text = _format_scalarmappable_value(artist, sel.target.index)
return "{}\n{}".format(text, cursor_text)
@get_ann_text.register(Barbs)
@_call_with_selection
def _(sel):
artist = sel.artist
text = "{}\n{}".format(
_format_coord_unspaced(artist.axes, sel.target),
(artist.u[sel.target.index], artist.v[sel.target.index]))
return text
@get_ann_text.register(Quiver)
@_call_with_selection
def _(sel):
artist = sel.artist
text = "{}\n{}".format(
_format_coord_unspaced(artist.axes, sel.target),
(artist.U[sel.target.index], artist.V[sel.target.index]))
return text
@get_ann_text.register(ContainerArtist)
@_call_with_selection
def _(sel):
return get_ann_text(*sel._replace(artist=sel.artist.container))
@get_ann_text.register(BarContainer)
@_call_with_selection
def _(sel):
return _format_coord_unspaced(
_artist_in_container(sel.artist).axes, sel.target)
@get_ann_text.register(ErrorbarContainer)
@_call_with_selection
def _(sel):
data_line, cap_lines, err_lcs = sel.artist
ann_text = get_ann_text(*sel._replace(artist=data_line))
if isinstance(sel.target.index, Integral):
err_lcs = iter(err_lcs)
for idx, (dir, has) in enumerate(
zip("xy", [sel.artist.has_xerr, sel.artist.has_yerr])):
if has:
err = (next(err_lcs).get_paths()[sel.target.index].vertices
- data_line.get_xydata()[sel.target.index])[:, idx]
err_s = [getattr(_artist_in_container(sel.artist).axes,
"format_{}data".format(dir))(e).rstrip()
for e in err]
# We'd normally want to check err.sum() == 0, but that can run
# into fp inaccuracies.
if len({s.lstrip("+-") for s in err_s}) == 1:
repl = r"\1=$\2\\pm{}$\3".format(err_s[1])
else:
err_s = [("+" if not s.startswith(("+", "-")) else "") + s
for s in err_s]
repl = r"\1=$\2_{{{}}}^{{{}}}$\3".format(*err_s)
ann_text = re.sub("({})=(.*)(\n?)".format(dir), repl, ann_text)
return ann_text
@get_ann_text.register(StemContainer)
@_call_with_selection
def _(sel):
return get_ann_text(*sel._replace(artist=sel.artist.markerline))
@functools.singledispatch
@_call_with_selection
def move(sel, *, key):
"""
Move a `Selection` (passed **unpacked**) following a keypress.
This function is used to implement annotation displacement through the
keyboard.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
return sel
def _move_within_points(sel, xys, *, key):
# Avoid infinite loop in case everything became nan at some point.
for _ in range(len(xys)):
if key == "left":
new_idx = int(np.ceil(sel.target.index) - 1) % len(xys)
elif key == "right":
new_idx = int(np.floor(sel.target.index) + 1) % len(xys)
else:
return sel
target = _with_attrs(xys[new_idx], index=new_idx)
sel = sel._replace(target=target, dist=0)
if np.isfinite(target).all():
return sel
@move.register(Line2D)
@_call_with_selection
def _(sel, *, key):
data_xy = sel.artist.get_xydata()
return _move_within_points(
sel,
_untransform(data_xy, sel.artist.get_transform().transform(data_xy),
sel.artist.axes),
key=key)
@move.register(PathCollection)
@_call_with_selection
def _(sel, *, key):
if _is_scatter(sel.artist):
offsets = sel.artist.get_offsets()
return _move_within_points(
sel,
_untransform(
offsets, sel.artist.get_offset_transform().transform(offsets),
sel.artist.axes),
key=key)
else:
return sel
@move.register(AxesImage)
@_call_with_selection
def _(sel, *, key):
ns = sel.artist.get_array().shape
idxs = (np.asarray(sel.target.index)
+ {"left": [0, -1],
"right": [0, 1],
"up": {"lower": [1, 0], "upper": [-1, 0]}[sel.artist.origin],
"down": {"lower": [-1, 0], "upper": [1, 0]}[sel.artist.origin]}[
key]) % ns
xmin, xmax, ymin, ymax = sel.artist.get_extent()
if sel.artist.origin == "upper":
ymin, ymax = ymax, ymin
low, high = np.array([[xmin, ymin], [xmax, ymax]])
target = _with_attrs(((idxs + .5) / ns)[::-1] * (high - low) + low,
index=tuple(idxs))
return sel._replace(target=target)
@move.register(ContainerArtist)
@_call_with_selection
def _(sel, *, key):
return (move(*sel._replace(artist=sel.artist.container), key=key)
._replace(artist=sel.artist))
@move.register(ErrorbarContainer)
@_call_with_selection
def _(sel, *, key):
data_line, cap_lines, err_lcs = sel.artist
return _move_within_points(sel, data_line.get_xydata(), key=key)
@functools.singledispatch
@_call_with_selection
def make_highlight(sel, *, highlight_kwargs):
"""
Create a highlight for a `Selection`.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
"Highlight support for {} is missing".format(type(sel.artist)))
def _set_valid_props(artist, kwargs):
"""Set valid properties for the artist, dropping the others."""
artist.set(**{k: kwargs[k] for k in kwargs if hasattr(artist, "set_" + k)})
return artist
@make_highlight.register(Line2D)
@_call_with_selection
def _(sel, *, highlight_kwargs):
hl = copy.copy(sel.artist)
_set_valid_props(hl, highlight_kwargs)
return hl
@make_highlight.register(PathCollection)
@_call_with_selection
def _(sel, *, highlight_kwargs):
hl = copy.copy(sel.artist)
offsets = hl.get_offsets()
hl.set_offsets(np.where(
np.arange(len(offsets))[:, None] == sel.target.index, offsets, np.nan))
_set_valid_props(hl, highlight_kwargs)
return hl
|
anntzer/mplcursors
|
lib/mplcursors/_pick_info.py
|
_compute_projection_pick
|
python
|
def _compute_projection_pick(artist, path, xy):
transform = artist.get_transform().frozen()
tpath = (path.cleaned(transform) if transform.is_affine
# `cleaned` only handles affine transforms.
else transform.transform_path(path).cleaned())
# `cleaned` should return a path where the first element is `MOVETO`, the
# following are `LINETO` or `CLOSEPOLY`, and the last one is `STOP`, i.e.
# codes = path.codes
# assert (codes[0], codes[-1]) == (path.MOVETO, path.STOP)
# assert np.in1d(codes[1:-1], [path.LINETO, path.CLOSEPOLY]).all()
vertices = tpath.vertices[:-1]
codes = tpath.codes[:-1]
vertices[codes == tpath.CLOSEPOLY] = vertices[0]
# Unit vectors for each segment.
us = vertices[1:] - vertices[:-1]
ls = np.hypot(*us.T)
with np.errstate(invalid="ignore"):
# Results in 0/0 for repeated consecutive points.
us /= ls[:, None]
# Vectors from each vertex to the event (overwritten below).
vs = xy - vertices[:-1]
# Clipped dot products -- `einsum` cannot be done in place, `clip` can.
# `clip` can trigger invalid comparisons if there are nan points.
with np.errstate(invalid="ignore"):
dot = np.clip(np.einsum("ij,ij->i", vs, us), 0, ls, out=vs[:, 0])
# Projections.
projs = vertices[:-1] + dot[:, None] * us
ds = np.hypot(*(xy - projs).T, out=vs[:, 1])
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError): # See above re: exceptions caught.
return
else:
target = AttrArray(
artist.axes.transData.inverted().transform_point(projs[argmin]))
target.index = (
(argmin + dot[argmin] / ls[argmin])
/ (path._interpolation_steps / tpath._interpolation_steps))
return Selection(artist, target, dmin, None, None)
|
Project *xy* on *path* to obtain a `Selection` for *artist*.
*path* is first transformed to screen coordinates using the artist
transform, and the target of the returned `Selection` is transformed
back to data coordinates using the artist *axes* inverse transform. The
`Selection` `index` is returned as a float. This function returns ``None``
for degenerate inputs.
The caller is responsible for converting the index to the proper class if
needed.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L201-L252
| null |
# Unsupported Artist classes: subclasses of AxesImage, QuadMesh (upstream could
# have a `format_coord`-like method); PolyCollection (picking is not well
# defined).
from collections import ChainMap, namedtuple
from contextlib import suppress
import copy
import functools
import inspect
from inspect import Signature
import itertools
from numbers import Integral
import re
import warnings
from weakref import WeakSet
from matplotlib import cbook
from matplotlib.axes import Axes
from matplotlib.backend_bases import RendererBase
from matplotlib.collections import (
LineCollection, PatchCollection, PathCollection)
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
from matplotlib.figure import Figure
from matplotlib.image import AxesImage
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, PathPatch, Polygon, Rectangle
from matplotlib.quiver import Barbs, Quiver
from matplotlib.text import Text
from matplotlib.transforms import Affine2D
import numpy as np
Integral.register(np.integer) # Back-compatibility for numpy 1.7, 1.8.
PATCH_PICKRADIUS = 5 # FIXME Patches do not provide `pickradius`.
def _register_scatter():
"""
Patch `PathCollection` and `scatter` to register their return values.
This registration allows us to distinguish `PathCollection`s created by
`Axes.scatter`, which should use point-like picking, from others, which
should use path-like picking. The former is more common, so we store the
latter instead; this also lets us guess the type better if this module is
imported late.
"""
@functools.wraps(PathCollection.__init__)
def __init__(self, *args, **kwargs):
_nonscatter_pathcollections.add(self)
return __init__.__wrapped__(self, *args, **kwargs)
PathCollection.__init__ = __init__
@functools.wraps(Axes.scatter)
def scatter(*args, **kwargs):
paths = scatter.__wrapped__(*args, **kwargs)
with suppress(KeyError):
_nonscatter_pathcollections.remove(paths)
return paths
Axes.scatter = scatter
_nonscatter_pathcollections = WeakSet()
_register_scatter()
def _is_scatter(artist):
return (isinstance(artist, PathCollection)
and artist not in _nonscatter_pathcollections)
def _artist_in_container(container):
return next(filter(None, container.get_children()))
class ContainerArtist:
"""Workaround to make containers behave more like artists."""
def __init__(self, container):
self.container = container # Guaranteed to be nonempty.
# We can't weakref the Container (which subclasses tuple), so
# we instead create a reference cycle between the Container and
# the ContainerArtist; as no one else strongly references the
# ContainerArtist, it will get GC'd whenever the Container is.
vars(container).setdefault(
"_{}__keep_alive".format(__class__.__name__), []).append(self)
def __str__(self):
return "<{}({})>".format(type(self).__name__, self.container)
def __repr__(self):
return "<{}({!r})>".format(type(self).__name__, self.container)
figure = property(lambda self: _artist_in_container(self.container).figure)
axes = property(lambda self: _artist_in_container(self.container).axes)
class AttrArray(np.ndarray):
"""An array subclass that can store additional attributes."""
def __new__(cls, array):
return np.asarray(array).view(cls)
def _with_attrs(array, **kwargs):
array = AttrArray(array)
for k, v in kwargs.items():
setattr(array, k, v)
return array
Selection = namedtuple("Selection", "artist target dist annotation extras")
# Override equality to identity: Selections should be considered immutable
# (with mutable fields though) and we don't want to trigger casts of array
# equality checks to booleans. We don't need to override comparisons because
# artists are already non-comparable.
Selection.__eq__ = lambda self, other: self is other
Selection.__ne__ = lambda self, other: self is not other
try:
Selection.artist.__doc__ = (
"The selected artist.")
Selection.target.__doc__ = (
"The point picked within the artist, in data coordinates.")
Selection.dist.__doc__ = (
"The distance from the click to the target, in pixels.")
Selection.annotation.__doc__ = (
"The instantiated `matplotlib.text.Annotation`.")
Selection.extras.__doc__ = (
"An additional list of artists (e.g., highlighters) that will be "
"cleared at the same time as the annotation.")
except AttributeError: # Read-only in Py3.4.
pass
@functools.singledispatch
def compute_pick(artist, event):
"""
Find whether *artist* has been picked by *event*.
If it has, return the appropriate `Selection`; otherwise return ``None``.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn("Pick support for {} is missing.".format(type(artist)))
class Index:
def __init__(self, i, x, y):
self.int = i
self.x = x
self.y = y
def floor(self):
return self.int
def ceil(self):
return self.int if max(self.x, self.y) == 0 else self.int + 1
def __format__(self, fmt):
return "{0.int}.(x={0.x:{1}}, y={0.y:{1}})".format(self, fmt)
def __str__(self):
return format(self, "")
@classmethod
def pre_index(cls, n_pts, index):
i, frac = divmod(index, 1)
i, odd = divmod(i, 2)
x, y = (0, frac) if not odd else (frac, 1)
return cls(i, x, y)
@classmethod
def post_index(cls, n_pts, index):
i, frac = divmod(index, 1)
i, odd = divmod(i, 2)
x, y = (frac, 0) if not odd else (1, frac)
return cls(i, x, y)
@classmethod
def mid_index(cls, n_pts, index):
i, frac = divmod(index, 1)
if i == 0:
frac = .5 + frac / 2
elif i == 2 * n_pts - 2: # One less line than points.
frac = frac / 2
quot, odd = divmod(i, 2)
if not odd:
if frac < .5:
i = quot - 1
x, y = frac + .5, 1
else:
i = quot
x, y = frac - .5, 0
else:
i = quot
x, y = .5, frac
return cls(i, x, y)
def _untransform(orig_xy, screen_xy, ax):
"""
Return data coordinates to place an annotation at screen coordinates
*screen_xy* in axes *ax*.
*orig_xy* are the "original" coordinates as stored by the artist; they are
transformed to *screen_xy* by whatever transform the artist uses. If the
artist uses ``ax.transData``, just return *orig_xy*; else, apply
``ax.transData.inverse()`` to *screen_xy*. (The first case is more
accurate than always applying ``ax.transData.inverse()``.)
"""
tr_xy = ax.transData.transform(orig_xy)
return (
orig_xy
if ((tr_xy == screen_xy) | np.isnan(tr_xy) & np.isnan(screen_xy)).all()
else ax.transData.inverted().transform(screen_xy))
@compute_pick.register(Line2D)
def _(artist, event):
# No need to call `line.contains` as we're going to redo the work anyways
# (also see matplotlib/matplotlib#6645, though that's fixed in mpl2.1).
# Always work in screen coordinates, as this is how we need to compute
# distances. Note that the artist transform may be different from the axes
# transform (e.g., for axvline).
xy = event.x, event.y
data_xy = artist.get_xydata()
data_screen_xy = artist.get_transform().transform(data_xy)
sels = []
# If markers are visible, find the closest vertex.
if artist.get_marker() not in ["None", "none", " ", "", None]:
ds = np.hypot(*(xy - data_screen_xy).T)
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError):
# numpy 1.7.0's `nanargmin([nan])` returns nan, so
# `ds[argmin]` raises IndexError. In later versions of numpy,
# `nanargmin([nan])` raises ValueError (the release notes for 1.8.0
# are incorrect on this topic).
pass
else:
# More precise than transforming back.
target = _with_attrs(
_untransform(
data_xy[argmin], data_screen_xy[argmin], artist.axes),
index=argmin)
sels.append(Selection(artist, target, dmin, None, None))
# If lines are visible, find the closest projection.
if (artist.get_linestyle() not in ["None", "none", " ", "", None]
and len(artist.get_xydata()) > 1):
sel = _compute_projection_pick(artist, artist.get_path(), xy)
if sel is not None:
sel.target.index = {
"_draw_lines": lambda _, index: index,
"_draw_steps_pre": Index.pre_index,
"_draw_steps_mid": Index.mid_index,
"_draw_steps_post": Index.post_index}[
Line2D.drawStyles[artist.get_drawstyle()]](
len(data_xy), sel.target.index)
sels.append(sel)
sel = min(sels, key=lambda sel: sel.dist, default=None)
return sel if sel and sel.dist < artist.get_pickradius() else None
@compute_pick.register(PathPatch)
@compute_pick.register(Polygon)
@compute_pick.register(Rectangle)
def _(artist, event):
sel = _compute_projection_pick(
artist, artist.get_path(), (event.x, event.y))
if sel and sel.dist < PATCH_PICKRADIUS:
return sel
@compute_pick.register(LineCollection)
@compute_pick.register(PatchCollection)
@compute_pick.register(PathCollection)
def _(artist, event):
# Use the C implementation to prune the list of segments.
contains, info = artist.contains(event)
if not contains:
return
offsets = artist.get_offsets()
paths = artist.get_paths()
if _is_scatter(artist):
inds = info["ind"]
offsets = artist.get_offsets()[inds]
offsets_screen = artist.get_offset_transform().transform(offsets)
ds = np.hypot(*(offsets_screen - [event.x, event.y]).T)
argmin = ds.argmin()
target = _with_attrs(
_untransform(offsets[argmin], offsets_screen[argmin], artist.axes),
index=inds[argmin])
return Selection(artist, target, ds[argmin], None, None)
else:
# Note that this won't select implicitly closed paths.
sels = [
_compute_projection_pick(
artist,
Affine2D().translate(*offsets[ind % len(offsets)])
.transform_path(paths[ind % len(paths)]),
(event.x, event.y))
for ind in info["ind"]]
sel, index = min(
((sel, info["ind"][idx]) for idx, sel in enumerate(sels) if sel),
key=lambda sel_idx: sel_idx[0].dist,
default=(None, None))
if sel:
sel = sel._replace(artist=artist)
sel.target.index = (index, sel.target.index)
if (isinstance(artist, PatchCollection)
and sel.dist >= PATCH_PICKRADIUS):
sel = None
return sel
@compute_pick.register(AxesImage)
def _(artist, event):
if type(artist) != AxesImage:
# Skip and warn on subclasses (`NonUniformImage`, `PcolorImage`) as
# they do not implement `contains` correctly. Even if they did, they
# would not support moving as we do not know where a given index maps
# back physically.
return compute_pick.dispatch(object)(artist, event)
contains, _ = artist.contains(event)
if not contains:
return
ns = np.asarray(artist.get_array().shape[:2])[::-1] # (y, x) -> (x, y)
xy = np.array([event.xdata, event.ydata])
xmin, xmax, ymin, ymax = artist.get_extent()
# Handling of "upper" origin copied from AxesImage.get_cursor_data.
if artist.origin == "upper":
ymin, ymax = ymax, ymin
low, high = np.array([[xmin, ymin], [xmax, ymax]])
idxs = ((xy - low) / (high - low) * ns).astype(int)[::-1]
target = _with_attrs(xy, index=tuple(idxs))
return Selection(artist, target, 0, None, None)
@compute_pick.register(Barbs)
@compute_pick.register(Quiver)
def _(artist, event):
offsets = artist.get_offsets()
offsets_screen = artist.get_offset_transform().transform(offsets)
ds = np.hypot(*(offsets_screen - [event.x, event.y]).T)
argmin = np.nanargmin(ds)
if ds[argmin] < artist.get_pickradius():
target = _with_attrs(
_untransform(offsets[argmin], offsets_screen[argmin], artist.axes),
index=argmin)
return Selection(artist, target, ds[argmin], None, None)
else:
return None
@compute_pick.register(Text)
def _(artist, event):
return
@compute_pick.register(ContainerArtist)
def _(artist, event):
sel = compute_pick(artist.container, event)
if sel:
sel = sel._replace(artist=artist)
return sel
@compute_pick.register(BarContainer)
def _(container, event):
try:
(idx, patch), = {
(idx, patch) for idx, patch in enumerate(container.patches)
if patch.contains(event)[0]}
except ValueError:
return
target = _with_attrs([event.xdata, event.ydata], index=idx)
if patch.sticky_edges.x:
target[0], = (
x for x in [patch.get_x(), patch.get_x() + patch.get_width()]
if x not in patch.sticky_edges.x)
if patch.sticky_edges.y:
target[1], = (
y for y in [patch.get_y(), patch.get_y() + patch.get_height()]
if y not in patch.sticky_edges.y)
return Selection(None, target, 0, None, None)
@compute_pick.register(ErrorbarContainer)
def _(container, event):
data_line, cap_lines, err_lcs = container
sel_data = compute_pick(data_line, event) if data_line else None
sel_err = min(
filter(None, (compute_pick(err_lc, event) for err_lc in err_lcs)),
key=lambda sel: sel.dist, default=None)
if (sel_data and sel_data.dist < getattr(sel_err, "dist", np.inf)):
return sel_data
elif sel_err:
idx, _ = sel_err.target.index
if data_line:
target = _with_attrs(data_line.get_xydata()[idx], index=idx)
else: # We can't guess the original data in that case!
return
return Selection(None, target, 0, None, None)
else:
return
@compute_pick.register(StemContainer)
def _(container, event):
sel = compute_pick(container.markerline, event)
if sel:
return sel
idx_sel = min(filter(lambda idx_sel: idx_sel[1] is not None,
((idx, compute_pick(line, event))
for idx, line in enumerate(container.stemlines))),
key=lambda idx_sel: idx_sel[1].dist, default=None)
if idx_sel:
idx, _ = idx_sel
target = _with_attrs(
container.stemlines[idx].get_xydata()[-1], index=idx)
return Selection(None, target, 0, None, None)
def _call_with_selection(func):
"""Decorator that passes a `Selection` built from the non-kwonly args."""
wrapped_kwonly_params = [
param for param in inspect.signature(func).parameters.values()
if param.kind == param.KEYWORD_ONLY]
sel_sig = inspect.signature(Selection)
default_sel_sig = sel_sig.replace(
parameters=[param.replace(default=None) if param.default is param.empty
else param
for param in sel_sig.parameters.values()])
@functools.wraps(func)
def wrapper(*args, **kwargs):
extra_kw = {param.name: kwargs.pop(param.name)
for param in wrapped_kwonly_params if param.name in kwargs}
ba = default_sel_sig.bind(*args, **kwargs)
# apply_defaults
ba.arguments = ChainMap(
ba.arguments,
{name: param.default
for name, param in default_sel_sig.parameters.items()
if param.default is not param.empty})
sel = Selection(*ba.args, **ba.kwargs)
return func(sel, **extra_kw)
wrapper.__signature__ = Signature(
list(sel_sig.parameters.values()) + wrapped_kwonly_params)
return wrapper
def _format_coord_unspaced(ax, xy):
# Un-space-pad, remove empty coordinates from the output of
# `format_{x,y}data`, and rejoin with newlines.
return "\n".join(
line for line, empty in zip(
re.split(",? +", ax.format_coord(*xy)),
itertools.chain(["x=", "y=", "z="], itertools.repeat(None)))
if line != empty).rstrip()
@functools.singledispatch
@_call_with_selection
def get_ann_text(sel):
"""
Compute an annotating text for a `Selection` (passed **unpacked**).
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
"Annotation support for {} is missing".format(type(sel.artist)))
return ""
def _strip_math(s):
return cbook.strip_math(s) if len(s) >= 2 and s[0] == s[-1] == "$" else s
def _format_scalarmappable_value(artist, idx): # matplotlib/matplotlib#12473.
data = artist.get_array()[idx]
if np.ndim(data) == 0:
if not artist.colorbar:
fig = Figure()
ax = fig.subplots()
artist.colorbar = fig.colorbar(artist, cax=ax)
# This hack updates the ticks without actually paying the cost of
# drawing (RendererBase.draw_path raises NotImplementedError).
try:
ax.yaxis.draw(RendererBase())
except NotImplementedError:
pass
fmt = artist.colorbar.formatter.format_data_short
return "[" + _strip_math(fmt(data).strip()) + "]"
else:
text = artist.format_cursor_data(data)
# get_cursor_data changed in Matplotlib 3.
if not re.match(r"\A\[.*\]\Z", text):
text = "[{}]".format(text)
return text
@get_ann_text.register(Line2D)
@get_ann_text.register(LineCollection)
@get_ann_text.register(PatchCollection)
@get_ann_text.register(PathCollection)
@get_ann_text.register(Patch)
@_call_with_selection
def _(sel):
artist = sel.artist
label = artist.get_label() or ""
text = _format_coord_unspaced(artist.axes, sel.target)
if (_is_scatter(artist)
# Heuristic: is the artist colormapped?
# Note that this doesn't handle size-mapping (which is more likely
# to involve an arbitrary scaling).
and artist.get_array() is not None
and len(artist.get_array()) == len(artist.get_offsets())):
value = _format_scalarmappable_value(artist, sel.target.index)
text = "{}\n{}".format(text, value)
if re.match("[^_]", label):
text = "{}\n{}".format(label, text)
return text
_Event = namedtuple("_Event", "xdata ydata")
@get_ann_text.register(AxesImage)
@_call_with_selection
def _(sel):
artist = sel.artist
text = _format_coord_unspaced(artist.axes, sel.target)
cursor_text = _format_scalarmappable_value(artist, sel.target.index)
return "{}\n{}".format(text, cursor_text)
@get_ann_text.register(Barbs)
@_call_with_selection
def _(sel):
artist = sel.artist
text = "{}\n{}".format(
_format_coord_unspaced(artist.axes, sel.target),
(artist.u[sel.target.index], artist.v[sel.target.index]))
return text
@get_ann_text.register(Quiver)
@_call_with_selection
def _(sel):
artist = sel.artist
text = "{}\n{}".format(
_format_coord_unspaced(artist.axes, sel.target),
(artist.U[sel.target.index], artist.V[sel.target.index]))
return text
@get_ann_text.register(ContainerArtist)
@_call_with_selection
def _(sel):
return get_ann_text(*sel._replace(artist=sel.artist.container))
@get_ann_text.register(BarContainer)
@_call_with_selection
def _(sel):
return _format_coord_unspaced(
_artist_in_container(sel.artist).axes, sel.target)
@get_ann_text.register(ErrorbarContainer)
@_call_with_selection
def _(sel):
data_line, cap_lines, err_lcs = sel.artist
ann_text = get_ann_text(*sel._replace(artist=data_line))
if isinstance(sel.target.index, Integral):
err_lcs = iter(err_lcs)
for idx, (dir, has) in enumerate(
zip("xy", [sel.artist.has_xerr, sel.artist.has_yerr])):
if has:
err = (next(err_lcs).get_paths()[sel.target.index].vertices
- data_line.get_xydata()[sel.target.index])[:, idx]
err_s = [getattr(_artist_in_container(sel.artist).axes,
"format_{}data".format(dir))(e).rstrip()
for e in err]
# We'd normally want to check err.sum() == 0, but that can run
# into fp inaccuracies.
if len({s.lstrip("+-") for s in err_s}) == 1:
repl = r"\1=$\2\\pm{}$\3".format(err_s[1])
else:
err_s = [("+" if not s.startswith(("+", "-")) else "") + s
for s in err_s]
repl = r"\1=$\2_{{{}}}^{{{}}}$\3".format(*err_s)
ann_text = re.sub("({})=(.*)(\n?)".format(dir), repl, ann_text)
return ann_text
@get_ann_text.register(StemContainer)
@_call_with_selection
def _(sel):
return get_ann_text(*sel._replace(artist=sel.artist.markerline))
@functools.singledispatch
@_call_with_selection
def move(sel, *, key):
"""
Move a `Selection` (passed **unpacked**) following a keypress.
This function is used to implement annotation displacement through the
keyboard.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
return sel
def _move_within_points(sel, xys, *, key):
# Avoid infinite loop in case everything became nan at some point.
for _ in range(len(xys)):
if key == "left":
new_idx = int(np.ceil(sel.target.index) - 1) % len(xys)
elif key == "right":
new_idx = int(np.floor(sel.target.index) + 1) % len(xys)
else:
return sel
target = _with_attrs(xys[new_idx], index=new_idx)
sel = sel._replace(target=target, dist=0)
if np.isfinite(target).all():
return sel
@move.register(Line2D)
@_call_with_selection
def _(sel, *, key):
data_xy = sel.artist.get_xydata()
return _move_within_points(
sel,
_untransform(data_xy, sel.artist.get_transform().transform(data_xy),
sel.artist.axes),
key=key)
@move.register(PathCollection)
@_call_with_selection
def _(sel, *, key):
if _is_scatter(sel.artist):
offsets = sel.artist.get_offsets()
return _move_within_points(
sel,
_untransform(
offsets, sel.artist.get_offset_transform().transform(offsets),
sel.artist.axes),
key=key)
else:
return sel
@move.register(AxesImage)
@_call_with_selection
def _(sel, *, key):
ns = sel.artist.get_array().shape
idxs = (np.asarray(sel.target.index)
+ {"left": [0, -1],
"right": [0, 1],
"up": {"lower": [1, 0], "upper": [-1, 0]}[sel.artist.origin],
"down": {"lower": [-1, 0], "upper": [1, 0]}[sel.artist.origin]}[
key]) % ns
xmin, xmax, ymin, ymax = sel.artist.get_extent()
if sel.artist.origin == "upper":
ymin, ymax = ymax, ymin
low, high = np.array([[xmin, ymin], [xmax, ymax]])
target = _with_attrs(((idxs + .5) / ns)[::-1] * (high - low) + low,
index=tuple(idxs))
return sel._replace(target=target)
@move.register(ContainerArtist)
@_call_with_selection
def _(sel, *, key):
return (move(*sel._replace(artist=sel.artist.container), key=key)
._replace(artist=sel.artist))
@move.register(ErrorbarContainer)
@_call_with_selection
def _(sel, *, key):
data_line, cap_lines, err_lcs = sel.artist
return _move_within_points(sel, data_line.get_xydata(), key=key)
@functools.singledispatch
@_call_with_selection
def make_highlight(sel, *, highlight_kwargs):
"""
Create a highlight for a `Selection`.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
"Highlight support for {} is missing".format(type(sel.artist)))
def _set_valid_props(artist, kwargs):
"""Set valid properties for the artist, dropping the others."""
artist.set(**{k: kwargs[k] for k in kwargs if hasattr(artist, "set_" + k)})
return artist
@make_highlight.register(Line2D)
@_call_with_selection
def _(sel, *, highlight_kwargs):
hl = copy.copy(sel.artist)
_set_valid_props(hl, highlight_kwargs)
return hl
@make_highlight.register(PathCollection)
@_call_with_selection
def _(sel, *, highlight_kwargs):
hl = copy.copy(sel.artist)
offsets = hl.get_offsets()
hl.set_offsets(np.where(
np.arange(len(offsets))[:, None] == sel.target.index, offsets, np.nan))
_set_valid_props(hl, highlight_kwargs)
return hl
|
anntzer/mplcursors
|
lib/mplcursors/_pick_info.py
|
_untransform
|
python
|
def _untransform(orig_xy, screen_xy, ax):
tr_xy = ax.transData.transform(orig_xy)
return (
orig_xy
if ((tr_xy == screen_xy) | np.isnan(tr_xy) & np.isnan(screen_xy)).all()
else ax.transData.inverted().transform(screen_xy))
|
Return data coordinates to place an annotation at screen coordinates
*screen_xy* in axes *ax*.
*orig_xy* are the "original" coordinates as stored by the artist; they are
transformed to *screen_xy* by whatever transform the artist uses. If the
artist uses ``ax.transData``, just return *orig_xy*; else, apply
``ax.transData.inverse()`` to *screen_xy*. (The first case is more
accurate than always applying ``ax.transData.inverse()``.)
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L255-L270
| null |
# Unsupported Artist classes: subclasses of AxesImage, QuadMesh (upstream could
# have a `format_coord`-like method); PolyCollection (picking is not well
# defined).
from collections import ChainMap, namedtuple
from contextlib import suppress
import copy
import functools
import inspect
from inspect import Signature
import itertools
from numbers import Integral
import re
import warnings
from weakref import WeakSet
from matplotlib import cbook
from matplotlib.axes import Axes
from matplotlib.backend_bases import RendererBase
from matplotlib.collections import (
LineCollection, PatchCollection, PathCollection)
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
from matplotlib.figure import Figure
from matplotlib.image import AxesImage
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, PathPatch, Polygon, Rectangle
from matplotlib.quiver import Barbs, Quiver
from matplotlib.text import Text
from matplotlib.transforms import Affine2D
import numpy as np
Integral.register(np.integer) # Back-compatibility for numpy 1.7, 1.8.
PATCH_PICKRADIUS = 5 # FIXME Patches do not provide `pickradius`.
def _register_scatter():
"""
Patch `PathCollection` and `scatter` to register their return values.
This registration allows us to distinguish `PathCollection`s created by
`Axes.scatter`, which should use point-like picking, from others, which
should use path-like picking. The former is more common, so we store the
latter instead; this also lets us guess the type better if this module is
imported late.
"""
@functools.wraps(PathCollection.__init__)
def __init__(self, *args, **kwargs):
_nonscatter_pathcollections.add(self)
return __init__.__wrapped__(self, *args, **kwargs)
PathCollection.__init__ = __init__
@functools.wraps(Axes.scatter)
def scatter(*args, **kwargs):
paths = scatter.__wrapped__(*args, **kwargs)
with suppress(KeyError):
_nonscatter_pathcollections.remove(paths)
return paths
Axes.scatter = scatter
_nonscatter_pathcollections = WeakSet()
_register_scatter()
def _is_scatter(artist):
return (isinstance(artist, PathCollection)
and artist not in _nonscatter_pathcollections)
def _artist_in_container(container):
return next(filter(None, container.get_children()))
class ContainerArtist:
"""Workaround to make containers behave more like artists."""
def __init__(self, container):
self.container = container # Guaranteed to be nonempty.
# We can't weakref the Container (which subclasses tuple), so
# we instead create a reference cycle between the Container and
# the ContainerArtist; as no one else strongly references the
# ContainerArtist, it will get GC'd whenever the Container is.
vars(container).setdefault(
"_{}__keep_alive".format(__class__.__name__), []).append(self)
def __str__(self):
return "<{}({})>".format(type(self).__name__, self.container)
def __repr__(self):
return "<{}({!r})>".format(type(self).__name__, self.container)
figure = property(lambda self: _artist_in_container(self.container).figure)
axes = property(lambda self: _artist_in_container(self.container).axes)
class AttrArray(np.ndarray):
"""An array subclass that can store additional attributes."""
def __new__(cls, array):
return np.asarray(array).view(cls)
def _with_attrs(array, **kwargs):
array = AttrArray(array)
for k, v in kwargs.items():
setattr(array, k, v)
return array
Selection = namedtuple("Selection", "artist target dist annotation extras")
# Override equality to identity: Selections should be considered immutable
# (with mutable fields though) and we don't want to trigger casts of array
# equality checks to booleans. We don't need to override comparisons because
# artists are already non-comparable.
Selection.__eq__ = lambda self, other: self is other
Selection.__ne__ = lambda self, other: self is not other
try:
Selection.artist.__doc__ = (
"The selected artist.")
Selection.target.__doc__ = (
"The point picked within the artist, in data coordinates.")
Selection.dist.__doc__ = (
"The distance from the click to the target, in pixels.")
Selection.annotation.__doc__ = (
"The instantiated `matplotlib.text.Annotation`.")
Selection.extras.__doc__ = (
"An additional list of artists (e.g., highlighters) that will be "
"cleared at the same time as the annotation.")
except AttributeError: # Read-only in Py3.4.
pass
@functools.singledispatch
def compute_pick(artist, event):
"""
Find whether *artist* has been picked by *event*.
If it has, return the appropriate `Selection`; otherwise return ``None``.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn("Pick support for {} is missing.".format(type(artist)))
class Index:
def __init__(self, i, x, y):
self.int = i
self.x = x
self.y = y
def floor(self):
return self.int
def ceil(self):
return self.int if max(self.x, self.y) == 0 else self.int + 1
def __format__(self, fmt):
return "{0.int}.(x={0.x:{1}}, y={0.y:{1}})".format(self, fmt)
def __str__(self):
return format(self, "")
@classmethod
def pre_index(cls, n_pts, index):
i, frac = divmod(index, 1)
i, odd = divmod(i, 2)
x, y = (0, frac) if not odd else (frac, 1)
return cls(i, x, y)
@classmethod
def post_index(cls, n_pts, index):
i, frac = divmod(index, 1)
i, odd = divmod(i, 2)
x, y = (frac, 0) if not odd else (1, frac)
return cls(i, x, y)
@classmethod
def mid_index(cls, n_pts, index):
i, frac = divmod(index, 1)
if i == 0:
frac = .5 + frac / 2
elif i == 2 * n_pts - 2: # One less line than points.
frac = frac / 2
quot, odd = divmod(i, 2)
if not odd:
if frac < .5:
i = quot - 1
x, y = frac + .5, 1
else:
i = quot
x, y = frac - .5, 0
else:
i = quot
x, y = .5, frac
return cls(i, x, y)
def _compute_projection_pick(artist, path, xy):
"""
Project *xy* on *path* to obtain a `Selection` for *artist*.
*path* is first transformed to screen coordinates using the artist
transform, and the target of the returned `Selection` is transformed
back to data coordinates using the artist *axes* inverse transform. The
`Selection` `index` is returned as a float. This function returns ``None``
for degenerate inputs.
The caller is responsible for converting the index to the proper class if
needed.
"""
transform = artist.get_transform().frozen()
tpath = (path.cleaned(transform) if transform.is_affine
# `cleaned` only handles affine transforms.
else transform.transform_path(path).cleaned())
# `cleaned` should return a path where the first element is `MOVETO`, the
# following are `LINETO` or `CLOSEPOLY`, and the last one is `STOP`, i.e.
# codes = path.codes
# assert (codes[0], codes[-1]) == (path.MOVETO, path.STOP)
# assert np.in1d(codes[1:-1], [path.LINETO, path.CLOSEPOLY]).all()
vertices = tpath.vertices[:-1]
codes = tpath.codes[:-1]
vertices[codes == tpath.CLOSEPOLY] = vertices[0]
# Unit vectors for each segment.
us = vertices[1:] - vertices[:-1]
ls = np.hypot(*us.T)
with np.errstate(invalid="ignore"):
# Results in 0/0 for repeated consecutive points.
us /= ls[:, None]
# Vectors from each vertex to the event (overwritten below).
vs = xy - vertices[:-1]
# Clipped dot products -- `einsum` cannot be done in place, `clip` can.
# `clip` can trigger invalid comparisons if there are nan points.
with np.errstate(invalid="ignore"):
dot = np.clip(np.einsum("ij,ij->i", vs, us), 0, ls, out=vs[:, 0])
# Projections.
projs = vertices[:-1] + dot[:, None] * us
ds = np.hypot(*(xy - projs).T, out=vs[:, 1])
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError): # See above re: exceptions caught.
return
else:
target = AttrArray(
artist.axes.transData.inverted().transform_point(projs[argmin]))
target.index = (
(argmin + dot[argmin] / ls[argmin])
/ (path._interpolation_steps / tpath._interpolation_steps))
return Selection(artist, target, dmin, None, None)
@compute_pick.register(Line2D)
def _(artist, event):
# No need to call `line.contains` as we're going to redo the work anyways
# (also see matplotlib/matplotlib#6645, though that's fixed in mpl2.1).
# Always work in screen coordinates, as this is how we need to compute
# distances. Note that the artist transform may be different from the axes
# transform (e.g., for axvline).
xy = event.x, event.y
data_xy = artist.get_xydata()
data_screen_xy = artist.get_transform().transform(data_xy)
sels = []
# If markers are visible, find the closest vertex.
if artist.get_marker() not in ["None", "none", " ", "", None]:
ds = np.hypot(*(xy - data_screen_xy).T)
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError):
# numpy 1.7.0's `nanargmin([nan])` returns nan, so
# `ds[argmin]` raises IndexError. In later versions of numpy,
# `nanargmin([nan])` raises ValueError (the release notes for 1.8.0
# are incorrect on this topic).
pass
else:
# More precise than transforming back.
target = _with_attrs(
_untransform(
data_xy[argmin], data_screen_xy[argmin], artist.axes),
index=argmin)
sels.append(Selection(artist, target, dmin, None, None))
# If lines are visible, find the closest projection.
if (artist.get_linestyle() not in ["None", "none", " ", "", None]
and len(artist.get_xydata()) > 1):
sel = _compute_projection_pick(artist, artist.get_path(), xy)
if sel is not None:
sel.target.index = {
"_draw_lines": lambda _, index: index,
"_draw_steps_pre": Index.pre_index,
"_draw_steps_mid": Index.mid_index,
"_draw_steps_post": Index.post_index}[
Line2D.drawStyles[artist.get_drawstyle()]](
len(data_xy), sel.target.index)
sels.append(sel)
sel = min(sels, key=lambda sel: sel.dist, default=None)
return sel if sel and sel.dist < artist.get_pickradius() else None
@compute_pick.register(PathPatch)
@compute_pick.register(Polygon)
@compute_pick.register(Rectangle)
def _(artist, event):
sel = _compute_projection_pick(
artist, artist.get_path(), (event.x, event.y))
if sel and sel.dist < PATCH_PICKRADIUS:
return sel
@compute_pick.register(LineCollection)
@compute_pick.register(PatchCollection)
@compute_pick.register(PathCollection)
def _(artist, event):
# Use the C implementation to prune the list of segments.
contains, info = artist.contains(event)
if not contains:
return
offsets = artist.get_offsets()
paths = artist.get_paths()
if _is_scatter(artist):
inds = info["ind"]
offsets = artist.get_offsets()[inds]
offsets_screen = artist.get_offset_transform().transform(offsets)
ds = np.hypot(*(offsets_screen - [event.x, event.y]).T)
argmin = ds.argmin()
target = _with_attrs(
_untransform(offsets[argmin], offsets_screen[argmin], artist.axes),
index=inds[argmin])
return Selection(artist, target, ds[argmin], None, None)
else:
# Note that this won't select implicitly closed paths.
sels = [
_compute_projection_pick(
artist,
Affine2D().translate(*offsets[ind % len(offsets)])
.transform_path(paths[ind % len(paths)]),
(event.x, event.y))
for ind in info["ind"]]
sel, index = min(
((sel, info["ind"][idx]) for idx, sel in enumerate(sels) if sel),
key=lambda sel_idx: sel_idx[0].dist,
default=(None, None))
if sel:
sel = sel._replace(artist=artist)
sel.target.index = (index, sel.target.index)
if (isinstance(artist, PatchCollection)
and sel.dist >= PATCH_PICKRADIUS):
sel = None
return sel
@compute_pick.register(AxesImage)
def _(artist, event):
if type(artist) != AxesImage:
# Skip and warn on subclasses (`NonUniformImage`, `PcolorImage`) as
# they do not implement `contains` correctly. Even if they did, they
# would not support moving as we do not know where a given index maps
# back physically.
return compute_pick.dispatch(object)(artist, event)
contains, _ = artist.contains(event)
if not contains:
return
ns = np.asarray(artist.get_array().shape[:2])[::-1] # (y, x) -> (x, y)
xy = np.array([event.xdata, event.ydata])
xmin, xmax, ymin, ymax = artist.get_extent()
# Handling of "upper" origin copied from AxesImage.get_cursor_data.
if artist.origin == "upper":
ymin, ymax = ymax, ymin
low, high = np.array([[xmin, ymin], [xmax, ymax]])
idxs = ((xy - low) / (high - low) * ns).astype(int)[::-1]
target = _with_attrs(xy, index=tuple(idxs))
return Selection(artist, target, 0, None, None)
@compute_pick.register(Barbs)
@compute_pick.register(Quiver)
def _(artist, event):
offsets = artist.get_offsets()
offsets_screen = artist.get_offset_transform().transform(offsets)
ds = np.hypot(*(offsets_screen - [event.x, event.y]).T)
argmin = np.nanargmin(ds)
if ds[argmin] < artist.get_pickradius():
target = _with_attrs(
_untransform(offsets[argmin], offsets_screen[argmin], artist.axes),
index=argmin)
return Selection(artist, target, ds[argmin], None, None)
else:
return None
@compute_pick.register(Text)
def _(artist, event):
return
@compute_pick.register(ContainerArtist)
def _(artist, event):
sel = compute_pick(artist.container, event)
if sel:
sel = sel._replace(artist=artist)
return sel
@compute_pick.register(BarContainer)
def _(container, event):
try:
(idx, patch), = {
(idx, patch) for idx, patch in enumerate(container.patches)
if patch.contains(event)[0]}
except ValueError:
return
target = _with_attrs([event.xdata, event.ydata], index=idx)
if patch.sticky_edges.x:
target[0], = (
x for x in [patch.get_x(), patch.get_x() + patch.get_width()]
if x not in patch.sticky_edges.x)
if patch.sticky_edges.y:
target[1], = (
y for y in [patch.get_y(), patch.get_y() + patch.get_height()]
if y not in patch.sticky_edges.y)
return Selection(None, target, 0, None, None)
@compute_pick.register(ErrorbarContainer)
def _(container, event):
data_line, cap_lines, err_lcs = container
sel_data = compute_pick(data_line, event) if data_line else None
sel_err = min(
filter(None, (compute_pick(err_lc, event) for err_lc in err_lcs)),
key=lambda sel: sel.dist, default=None)
if (sel_data and sel_data.dist < getattr(sel_err, "dist", np.inf)):
return sel_data
elif sel_err:
idx, _ = sel_err.target.index
if data_line:
target = _with_attrs(data_line.get_xydata()[idx], index=idx)
else: # We can't guess the original data in that case!
return
return Selection(None, target, 0, None, None)
else:
return
@compute_pick.register(StemContainer)
def _(container, event):
sel = compute_pick(container.markerline, event)
if sel:
return sel
idx_sel = min(filter(lambda idx_sel: idx_sel[1] is not None,
((idx, compute_pick(line, event))
for idx, line in enumerate(container.stemlines))),
key=lambda idx_sel: idx_sel[1].dist, default=None)
if idx_sel:
idx, _ = idx_sel
target = _with_attrs(
container.stemlines[idx].get_xydata()[-1], index=idx)
return Selection(None, target, 0, None, None)
def _call_with_selection(func):
"""Decorator that passes a `Selection` built from the non-kwonly args."""
wrapped_kwonly_params = [
param for param in inspect.signature(func).parameters.values()
if param.kind == param.KEYWORD_ONLY]
sel_sig = inspect.signature(Selection)
default_sel_sig = sel_sig.replace(
parameters=[param.replace(default=None) if param.default is param.empty
else param
for param in sel_sig.parameters.values()])
@functools.wraps(func)
def wrapper(*args, **kwargs):
extra_kw = {param.name: kwargs.pop(param.name)
for param in wrapped_kwonly_params if param.name in kwargs}
ba = default_sel_sig.bind(*args, **kwargs)
# apply_defaults
ba.arguments = ChainMap(
ba.arguments,
{name: param.default
for name, param in default_sel_sig.parameters.items()
if param.default is not param.empty})
sel = Selection(*ba.args, **ba.kwargs)
return func(sel, **extra_kw)
wrapper.__signature__ = Signature(
list(sel_sig.parameters.values()) + wrapped_kwonly_params)
return wrapper
def _format_coord_unspaced(ax, xy):
# Un-space-pad, remove empty coordinates from the output of
# `format_{x,y}data`, and rejoin with newlines.
return "\n".join(
line for line, empty in zip(
re.split(",? +", ax.format_coord(*xy)),
itertools.chain(["x=", "y=", "z="], itertools.repeat(None)))
if line != empty).rstrip()
@functools.singledispatch
@_call_with_selection
def get_ann_text(sel):
"""
Compute an annotating text for a `Selection` (passed **unpacked**).
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
"Annotation support for {} is missing".format(type(sel.artist)))
return ""
def _strip_math(s):
return cbook.strip_math(s) if len(s) >= 2 and s[0] == s[-1] == "$" else s
def _format_scalarmappable_value(artist, idx): # matplotlib/matplotlib#12473.
data = artist.get_array()[idx]
if np.ndim(data) == 0:
if not artist.colorbar:
fig = Figure()
ax = fig.subplots()
artist.colorbar = fig.colorbar(artist, cax=ax)
# This hack updates the ticks without actually paying the cost of
# drawing (RendererBase.draw_path raises NotImplementedError).
try:
ax.yaxis.draw(RendererBase())
except NotImplementedError:
pass
fmt = artist.colorbar.formatter.format_data_short
return "[" + _strip_math(fmt(data).strip()) + "]"
else:
text = artist.format_cursor_data(data)
# get_cursor_data changed in Matplotlib 3.
if not re.match(r"\A\[.*\]\Z", text):
text = "[{}]".format(text)
return text
@get_ann_text.register(Line2D)
@get_ann_text.register(LineCollection)
@get_ann_text.register(PatchCollection)
@get_ann_text.register(PathCollection)
@get_ann_text.register(Patch)
@_call_with_selection
def _(sel):
artist = sel.artist
label = artist.get_label() or ""
text = _format_coord_unspaced(artist.axes, sel.target)
if (_is_scatter(artist)
# Heuristic: is the artist colormapped?
# Note that this doesn't handle size-mapping (which is more likely
# to involve an arbitrary scaling).
and artist.get_array() is not None
and len(artist.get_array()) == len(artist.get_offsets())):
value = _format_scalarmappable_value(artist, sel.target.index)
text = "{}\n{}".format(text, value)
if re.match("[^_]", label):
text = "{}\n{}".format(label, text)
return text
_Event = namedtuple("_Event", "xdata ydata")
@get_ann_text.register(AxesImage)
@_call_with_selection
def _(sel):
artist = sel.artist
text = _format_coord_unspaced(artist.axes, sel.target)
cursor_text = _format_scalarmappable_value(artist, sel.target.index)
return "{}\n{}".format(text, cursor_text)
@get_ann_text.register(Barbs)
@_call_with_selection
def _(sel):
artist = sel.artist
text = "{}\n{}".format(
_format_coord_unspaced(artist.axes, sel.target),
(artist.u[sel.target.index], artist.v[sel.target.index]))
return text
@get_ann_text.register(Quiver)
@_call_with_selection
def _(sel):
artist = sel.artist
text = "{}\n{}".format(
_format_coord_unspaced(artist.axes, sel.target),
(artist.U[sel.target.index], artist.V[sel.target.index]))
return text
@get_ann_text.register(ContainerArtist)
@_call_with_selection
def _(sel):
return get_ann_text(*sel._replace(artist=sel.artist.container))
@get_ann_text.register(BarContainer)
@_call_with_selection
def _(sel):
return _format_coord_unspaced(
_artist_in_container(sel.artist).axes, sel.target)
@get_ann_text.register(ErrorbarContainer)
@_call_with_selection
def _(sel):
data_line, cap_lines, err_lcs = sel.artist
ann_text = get_ann_text(*sel._replace(artist=data_line))
if isinstance(sel.target.index, Integral):
err_lcs = iter(err_lcs)
for idx, (dir, has) in enumerate(
zip("xy", [sel.artist.has_xerr, sel.artist.has_yerr])):
if has:
err = (next(err_lcs).get_paths()[sel.target.index].vertices
- data_line.get_xydata()[sel.target.index])[:, idx]
err_s = [getattr(_artist_in_container(sel.artist).axes,
"format_{}data".format(dir))(e).rstrip()
for e in err]
# We'd normally want to check err.sum() == 0, but that can run
# into fp inaccuracies.
if len({s.lstrip("+-") for s in err_s}) == 1:
repl = r"\1=$\2\\pm{}$\3".format(err_s[1])
else:
err_s = [("+" if not s.startswith(("+", "-")) else "") + s
for s in err_s]
repl = r"\1=$\2_{{{}}}^{{{}}}$\3".format(*err_s)
ann_text = re.sub("({})=(.*)(\n?)".format(dir), repl, ann_text)
return ann_text
@get_ann_text.register(StemContainer)
@_call_with_selection
def _(sel):
return get_ann_text(*sel._replace(artist=sel.artist.markerline))
@functools.singledispatch
@_call_with_selection
def move(sel, *, key):
"""
Move a `Selection` (passed **unpacked**) following a keypress.
This function is used to implement annotation displacement through the
keyboard.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
return sel
def _move_within_points(sel, xys, *, key):
# Avoid infinite loop in case everything became nan at some point.
for _ in range(len(xys)):
if key == "left":
new_idx = int(np.ceil(sel.target.index) - 1) % len(xys)
elif key == "right":
new_idx = int(np.floor(sel.target.index) + 1) % len(xys)
else:
return sel
target = _with_attrs(xys[new_idx], index=new_idx)
sel = sel._replace(target=target, dist=0)
if np.isfinite(target).all():
return sel
@move.register(Line2D)
@_call_with_selection
def _(sel, *, key):
data_xy = sel.artist.get_xydata()
return _move_within_points(
sel,
_untransform(data_xy, sel.artist.get_transform().transform(data_xy),
sel.artist.axes),
key=key)
@move.register(PathCollection)
@_call_with_selection
def _(sel, *, key):
if _is_scatter(sel.artist):
offsets = sel.artist.get_offsets()
return _move_within_points(
sel,
_untransform(
offsets, sel.artist.get_offset_transform().transform(offsets),
sel.artist.axes),
key=key)
else:
return sel
@move.register(AxesImage)
@_call_with_selection
def _(sel, *, key):
ns = sel.artist.get_array().shape
idxs = (np.asarray(sel.target.index)
+ {"left": [0, -1],
"right": [0, 1],
"up": {"lower": [1, 0], "upper": [-1, 0]}[sel.artist.origin],
"down": {"lower": [-1, 0], "upper": [1, 0]}[sel.artist.origin]}[
key]) % ns
xmin, xmax, ymin, ymax = sel.artist.get_extent()
if sel.artist.origin == "upper":
ymin, ymax = ymax, ymin
low, high = np.array([[xmin, ymin], [xmax, ymax]])
target = _with_attrs(((idxs + .5) / ns)[::-1] * (high - low) + low,
index=tuple(idxs))
return sel._replace(target=target)
@move.register(ContainerArtist)
@_call_with_selection
def _(sel, *, key):
return (move(*sel._replace(artist=sel.artist.container), key=key)
._replace(artist=sel.artist))
@move.register(ErrorbarContainer)
@_call_with_selection
def _(sel, *, key):
data_line, cap_lines, err_lcs = sel.artist
return _move_within_points(sel, data_line.get_xydata(), key=key)
@functools.singledispatch
@_call_with_selection
def make_highlight(sel, *, highlight_kwargs):
"""
Create a highlight for a `Selection`.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
"Highlight support for {} is missing".format(type(sel.artist)))
def _set_valid_props(artist, kwargs):
"""Set valid properties for the artist, dropping the others."""
artist.set(**{k: kwargs[k] for k in kwargs if hasattr(artist, "set_" + k)})
return artist
@make_highlight.register(Line2D)
@_call_with_selection
def _(sel, *, highlight_kwargs):
hl = copy.copy(sel.artist)
_set_valid_props(hl, highlight_kwargs)
return hl
@make_highlight.register(PathCollection)
@_call_with_selection
def _(sel, *, highlight_kwargs):
hl = copy.copy(sel.artist)
offsets = hl.get_offsets()
hl.set_offsets(np.where(
np.arange(len(offsets))[:, None] == sel.target.index, offsets, np.nan))
_set_valid_props(hl, highlight_kwargs)
return hl
|
anntzer/mplcursors
|
lib/mplcursors/_pick_info.py
|
_call_with_selection
|
python
|
def _call_with_selection(func):
wrapped_kwonly_params = [
param for param in inspect.signature(func).parameters.values()
if param.kind == param.KEYWORD_ONLY]
sel_sig = inspect.signature(Selection)
default_sel_sig = sel_sig.replace(
parameters=[param.replace(default=None) if param.default is param.empty
else param
for param in sel_sig.parameters.values()])
@functools.wraps(func)
def wrapper(*args, **kwargs):
extra_kw = {param.name: kwargs.pop(param.name)
for param in wrapped_kwonly_params if param.name in kwargs}
ba = default_sel_sig.bind(*args, **kwargs)
# apply_defaults
ba.arguments = ChainMap(
ba.arguments,
{name: param.default
for name, param in default_sel_sig.parameters.items()
if param.default is not param.empty})
sel = Selection(*ba.args, **ba.kwargs)
return func(sel, **extra_kw)
wrapper.__signature__ = Signature(
list(sel_sig.parameters.values()) + wrapped_kwonly_params)
return wrapper
|
Decorator that passes a `Selection` built from the non-kwonly args.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L481-L508
| null |
# Unsupported Artist classes: subclasses of AxesImage, QuadMesh (upstream could
# have a `format_coord`-like method); PolyCollection (picking is not well
# defined).
from collections import ChainMap, namedtuple
from contextlib import suppress
import copy
import functools
import inspect
from inspect import Signature
import itertools
from numbers import Integral
import re
import warnings
from weakref import WeakSet
from matplotlib import cbook
from matplotlib.axes import Axes
from matplotlib.backend_bases import RendererBase
from matplotlib.collections import (
LineCollection, PatchCollection, PathCollection)
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
from matplotlib.figure import Figure
from matplotlib.image import AxesImage
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, PathPatch, Polygon, Rectangle
from matplotlib.quiver import Barbs, Quiver
from matplotlib.text import Text
from matplotlib.transforms import Affine2D
import numpy as np
Integral.register(np.integer) # Back-compatibility for numpy 1.7, 1.8.
PATCH_PICKRADIUS = 5 # FIXME Patches do not provide `pickradius`.
def _register_scatter():
"""
Patch `PathCollection` and `scatter` to register their return values.
This registration allows us to distinguish `PathCollection`s created by
`Axes.scatter`, which should use point-like picking, from others, which
should use path-like picking. The former is more common, so we store the
latter instead; this also lets us guess the type better if this module is
imported late.
"""
@functools.wraps(PathCollection.__init__)
def __init__(self, *args, **kwargs):
_nonscatter_pathcollections.add(self)
return __init__.__wrapped__(self, *args, **kwargs)
PathCollection.__init__ = __init__
@functools.wraps(Axes.scatter)
def scatter(*args, **kwargs):
paths = scatter.__wrapped__(*args, **kwargs)
with suppress(KeyError):
_nonscatter_pathcollections.remove(paths)
return paths
Axes.scatter = scatter
_nonscatter_pathcollections = WeakSet()
_register_scatter()
def _is_scatter(artist):
return (isinstance(artist, PathCollection)
and artist not in _nonscatter_pathcollections)
def _artist_in_container(container):
return next(filter(None, container.get_children()))
class ContainerArtist:
"""Workaround to make containers behave more like artists."""
def __init__(self, container):
self.container = container # Guaranteed to be nonempty.
# We can't weakref the Container (which subclasses tuple), so
# we instead create a reference cycle between the Container and
# the ContainerArtist; as no one else strongly references the
# ContainerArtist, it will get GC'd whenever the Container is.
vars(container).setdefault(
"_{}__keep_alive".format(__class__.__name__), []).append(self)
def __str__(self):
return "<{}({})>".format(type(self).__name__, self.container)
def __repr__(self):
return "<{}({!r})>".format(type(self).__name__, self.container)
figure = property(lambda self: _artist_in_container(self.container).figure)
axes = property(lambda self: _artist_in_container(self.container).axes)
class AttrArray(np.ndarray):
"""An array subclass that can store additional attributes."""
def __new__(cls, array):
return np.asarray(array).view(cls)
def _with_attrs(array, **kwargs):
array = AttrArray(array)
for k, v in kwargs.items():
setattr(array, k, v)
return array
Selection = namedtuple("Selection", "artist target dist annotation extras")
# Override equality to identity: Selections should be considered immutable
# (with mutable fields though) and we don't want to trigger casts of array
# equality checks to booleans. We don't need to override comparisons because
# artists are already non-comparable.
Selection.__eq__ = lambda self, other: self is other
Selection.__ne__ = lambda self, other: self is not other
try:
Selection.artist.__doc__ = (
"The selected artist.")
Selection.target.__doc__ = (
"The point picked within the artist, in data coordinates.")
Selection.dist.__doc__ = (
"The distance from the click to the target, in pixels.")
Selection.annotation.__doc__ = (
"The instantiated `matplotlib.text.Annotation`.")
Selection.extras.__doc__ = (
"An additional list of artists (e.g., highlighters) that will be "
"cleared at the same time as the annotation.")
except AttributeError: # Read-only in Py3.4.
pass
@functools.singledispatch
def compute_pick(artist, event):
"""
Find whether *artist* has been picked by *event*.
If it has, return the appropriate `Selection`; otherwise return ``None``.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn("Pick support for {} is missing.".format(type(artist)))
class Index:
def __init__(self, i, x, y):
self.int = i
self.x = x
self.y = y
def floor(self):
return self.int
def ceil(self):
return self.int if max(self.x, self.y) == 0 else self.int + 1
def __format__(self, fmt):
return "{0.int}.(x={0.x:{1}}, y={0.y:{1}})".format(self, fmt)
def __str__(self):
return format(self, "")
@classmethod
def pre_index(cls, n_pts, index):
i, frac = divmod(index, 1)
i, odd = divmod(i, 2)
x, y = (0, frac) if not odd else (frac, 1)
return cls(i, x, y)
@classmethod
def post_index(cls, n_pts, index):
i, frac = divmod(index, 1)
i, odd = divmod(i, 2)
x, y = (frac, 0) if not odd else (1, frac)
return cls(i, x, y)
@classmethod
def mid_index(cls, n_pts, index):
i, frac = divmod(index, 1)
if i == 0:
frac = .5 + frac / 2
elif i == 2 * n_pts - 2: # One less line than points.
frac = frac / 2
quot, odd = divmod(i, 2)
if not odd:
if frac < .5:
i = quot - 1
x, y = frac + .5, 1
else:
i = quot
x, y = frac - .5, 0
else:
i = quot
x, y = .5, frac
return cls(i, x, y)
def _compute_projection_pick(artist, path, xy):
"""
Project *xy* on *path* to obtain a `Selection` for *artist*.
*path* is first transformed to screen coordinates using the artist
transform, and the target of the returned `Selection` is transformed
back to data coordinates using the artist *axes* inverse transform. The
`Selection` `index` is returned as a float. This function returns ``None``
for degenerate inputs.
The caller is responsible for converting the index to the proper class if
needed.
"""
transform = artist.get_transform().frozen()
tpath = (path.cleaned(transform) if transform.is_affine
# `cleaned` only handles affine transforms.
else transform.transform_path(path).cleaned())
# `cleaned` should return a path where the first element is `MOVETO`, the
# following are `LINETO` or `CLOSEPOLY`, and the last one is `STOP`, i.e.
# codes = path.codes
# assert (codes[0], codes[-1]) == (path.MOVETO, path.STOP)
# assert np.in1d(codes[1:-1], [path.LINETO, path.CLOSEPOLY]).all()
vertices = tpath.vertices[:-1]
codes = tpath.codes[:-1]
vertices[codes == tpath.CLOSEPOLY] = vertices[0]
# Unit vectors for each segment.
us = vertices[1:] - vertices[:-1]
ls = np.hypot(*us.T)
with np.errstate(invalid="ignore"):
# Results in 0/0 for repeated consecutive points.
us /= ls[:, None]
# Vectors from each vertex to the event (overwritten below).
vs = xy - vertices[:-1]
# Clipped dot products -- `einsum` cannot be done in place, `clip` can.
# `clip` can trigger invalid comparisons if there are nan points.
with np.errstate(invalid="ignore"):
dot = np.clip(np.einsum("ij,ij->i", vs, us), 0, ls, out=vs[:, 0])
# Projections.
projs = vertices[:-1] + dot[:, None] * us
ds = np.hypot(*(xy - projs).T, out=vs[:, 1])
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError): # See above re: exceptions caught.
return
else:
target = AttrArray(
artist.axes.transData.inverted().transform_point(projs[argmin]))
target.index = (
(argmin + dot[argmin] / ls[argmin])
/ (path._interpolation_steps / tpath._interpolation_steps))
return Selection(artist, target, dmin, None, None)
def _untransform(orig_xy, screen_xy, ax):
"""
Return data coordinates to place an annotation at screen coordinates
*screen_xy* in axes *ax*.
*orig_xy* are the "original" coordinates as stored by the artist; they are
transformed to *screen_xy* by whatever transform the artist uses. If the
artist uses ``ax.transData``, just return *orig_xy*; else, apply
``ax.transData.inverse()`` to *screen_xy*. (The first case is more
accurate than always applying ``ax.transData.inverse()``.)
"""
tr_xy = ax.transData.transform(orig_xy)
return (
orig_xy
if ((tr_xy == screen_xy) | np.isnan(tr_xy) & np.isnan(screen_xy)).all()
else ax.transData.inverted().transform(screen_xy))
@compute_pick.register(Line2D)
def _(artist, event):
# No need to call `line.contains` as we're going to redo the work anyways
# (also see matplotlib/matplotlib#6645, though that's fixed in mpl2.1).
# Always work in screen coordinates, as this is how we need to compute
# distances. Note that the artist transform may be different from the axes
# transform (e.g., for axvline).
xy = event.x, event.y
data_xy = artist.get_xydata()
data_screen_xy = artist.get_transform().transform(data_xy)
sels = []
# If markers are visible, find the closest vertex.
if artist.get_marker() not in ["None", "none", " ", "", None]:
ds = np.hypot(*(xy - data_screen_xy).T)
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError):
# numpy 1.7.0's `nanargmin([nan])` returns nan, so
# `ds[argmin]` raises IndexError. In later versions of numpy,
# `nanargmin([nan])` raises ValueError (the release notes for 1.8.0
# are incorrect on this topic).
pass
else:
# More precise than transforming back.
target = _with_attrs(
_untransform(
data_xy[argmin], data_screen_xy[argmin], artist.axes),
index=argmin)
sels.append(Selection(artist, target, dmin, None, None))
# If lines are visible, find the closest projection.
if (artist.get_linestyle() not in ["None", "none", " ", "", None]
and len(artist.get_xydata()) > 1):
sel = _compute_projection_pick(artist, artist.get_path(), xy)
if sel is not None:
sel.target.index = {
"_draw_lines": lambda _, index: index,
"_draw_steps_pre": Index.pre_index,
"_draw_steps_mid": Index.mid_index,
"_draw_steps_post": Index.post_index}[
Line2D.drawStyles[artist.get_drawstyle()]](
len(data_xy), sel.target.index)
sels.append(sel)
sel = min(sels, key=lambda sel: sel.dist, default=None)
return sel if sel and sel.dist < artist.get_pickradius() else None
@compute_pick.register(PathPatch)
@compute_pick.register(Polygon)
@compute_pick.register(Rectangle)
def _(artist, event):
sel = _compute_projection_pick(
artist, artist.get_path(), (event.x, event.y))
if sel and sel.dist < PATCH_PICKRADIUS:
return sel
@compute_pick.register(LineCollection)
@compute_pick.register(PatchCollection)
@compute_pick.register(PathCollection)
def _(artist, event):
# Use the C implementation to prune the list of segments.
contains, info = artist.contains(event)
if not contains:
return
offsets = artist.get_offsets()
paths = artist.get_paths()
if _is_scatter(artist):
inds = info["ind"]
offsets = artist.get_offsets()[inds]
offsets_screen = artist.get_offset_transform().transform(offsets)
ds = np.hypot(*(offsets_screen - [event.x, event.y]).T)
argmin = ds.argmin()
target = _with_attrs(
_untransform(offsets[argmin], offsets_screen[argmin], artist.axes),
index=inds[argmin])
return Selection(artist, target, ds[argmin], None, None)
else:
# Note that this won't select implicitly closed paths.
sels = [
_compute_projection_pick(
artist,
Affine2D().translate(*offsets[ind % len(offsets)])
.transform_path(paths[ind % len(paths)]),
(event.x, event.y))
for ind in info["ind"]]
sel, index = min(
((sel, info["ind"][idx]) for idx, sel in enumerate(sels) if sel),
key=lambda sel_idx: sel_idx[0].dist,
default=(None, None))
if sel:
sel = sel._replace(artist=artist)
sel.target.index = (index, sel.target.index)
if (isinstance(artist, PatchCollection)
and sel.dist >= PATCH_PICKRADIUS):
sel = None
return sel
@compute_pick.register(AxesImage)
def _(artist, event):
if type(artist) != AxesImage:
# Skip and warn on subclasses (`NonUniformImage`, `PcolorImage`) as
# they do not implement `contains` correctly. Even if they did, they
# would not support moving as we do not know where a given index maps
# back physically.
return compute_pick.dispatch(object)(artist, event)
contains, _ = artist.contains(event)
if not contains:
return
ns = np.asarray(artist.get_array().shape[:2])[::-1] # (y, x) -> (x, y)
xy = np.array([event.xdata, event.ydata])
xmin, xmax, ymin, ymax = artist.get_extent()
# Handling of "upper" origin copied from AxesImage.get_cursor_data.
if artist.origin == "upper":
ymin, ymax = ymax, ymin
low, high = np.array([[xmin, ymin], [xmax, ymax]])
idxs = ((xy - low) / (high - low) * ns).astype(int)[::-1]
target = _with_attrs(xy, index=tuple(idxs))
return Selection(artist, target, 0, None, None)
@compute_pick.register(Barbs)
@compute_pick.register(Quiver)
def _(artist, event):
offsets = artist.get_offsets()
offsets_screen = artist.get_offset_transform().transform(offsets)
ds = np.hypot(*(offsets_screen - [event.x, event.y]).T)
argmin = np.nanargmin(ds)
if ds[argmin] < artist.get_pickradius():
target = _with_attrs(
_untransform(offsets[argmin], offsets_screen[argmin], artist.axes),
index=argmin)
return Selection(artist, target, ds[argmin], None, None)
else:
return None
@compute_pick.register(Text)
def _(artist, event):
return
@compute_pick.register(ContainerArtist)
def _(artist, event):
sel = compute_pick(artist.container, event)
if sel:
sel = sel._replace(artist=artist)
return sel
@compute_pick.register(BarContainer)
def _(container, event):
try:
(idx, patch), = {
(idx, patch) for idx, patch in enumerate(container.patches)
if patch.contains(event)[0]}
except ValueError:
return
target = _with_attrs([event.xdata, event.ydata], index=idx)
if patch.sticky_edges.x:
target[0], = (
x for x in [patch.get_x(), patch.get_x() + patch.get_width()]
if x not in patch.sticky_edges.x)
if patch.sticky_edges.y:
target[1], = (
y for y in [patch.get_y(), patch.get_y() + patch.get_height()]
if y not in patch.sticky_edges.y)
return Selection(None, target, 0, None, None)
@compute_pick.register(ErrorbarContainer)
def _(container, event):
data_line, cap_lines, err_lcs = container
sel_data = compute_pick(data_line, event) if data_line else None
sel_err = min(
filter(None, (compute_pick(err_lc, event) for err_lc in err_lcs)),
key=lambda sel: sel.dist, default=None)
if (sel_data and sel_data.dist < getattr(sel_err, "dist", np.inf)):
return sel_data
elif sel_err:
idx, _ = sel_err.target.index
if data_line:
target = _with_attrs(data_line.get_xydata()[idx], index=idx)
else: # We can't guess the original data in that case!
return
return Selection(None, target, 0, None, None)
else:
return
@compute_pick.register(StemContainer)
def _(container, event):
sel = compute_pick(container.markerline, event)
if sel:
return sel
idx_sel = min(filter(lambda idx_sel: idx_sel[1] is not None,
((idx, compute_pick(line, event))
for idx, line in enumerate(container.stemlines))),
key=lambda idx_sel: idx_sel[1].dist, default=None)
if idx_sel:
idx, _ = idx_sel
target = _with_attrs(
container.stemlines[idx].get_xydata()[-1], index=idx)
return Selection(None, target, 0, None, None)
def _format_coord_unspaced(ax, xy):
# Un-space-pad, remove empty coordinates from the output of
# `format_{x,y}data`, and rejoin with newlines.
return "\n".join(
line for line, empty in zip(
re.split(",? +", ax.format_coord(*xy)),
itertools.chain(["x=", "y=", "z="], itertools.repeat(None)))
if line != empty).rstrip()
@functools.singledispatch
@_call_with_selection
def get_ann_text(sel):
"""
Compute an annotating text for a `Selection` (passed **unpacked**).
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
"Annotation support for {} is missing".format(type(sel.artist)))
return ""
def _strip_math(s):
return cbook.strip_math(s) if len(s) >= 2 and s[0] == s[-1] == "$" else s
def _format_scalarmappable_value(artist, idx): # matplotlib/matplotlib#12473.
data = artist.get_array()[idx]
if np.ndim(data) == 0:
if not artist.colorbar:
fig = Figure()
ax = fig.subplots()
artist.colorbar = fig.colorbar(artist, cax=ax)
# This hack updates the ticks without actually paying the cost of
# drawing (RendererBase.draw_path raises NotImplementedError).
try:
ax.yaxis.draw(RendererBase())
except NotImplementedError:
pass
fmt = artist.colorbar.formatter.format_data_short
return "[" + _strip_math(fmt(data).strip()) + "]"
else:
text = artist.format_cursor_data(data)
# get_cursor_data changed in Matplotlib 3.
if not re.match(r"\A\[.*\]\Z", text):
text = "[{}]".format(text)
return text
@get_ann_text.register(Line2D)
@get_ann_text.register(LineCollection)
@get_ann_text.register(PatchCollection)
@get_ann_text.register(PathCollection)
@get_ann_text.register(Patch)
@_call_with_selection
def _(sel):
artist = sel.artist
label = artist.get_label() or ""
text = _format_coord_unspaced(artist.axes, sel.target)
if (_is_scatter(artist)
# Heuristic: is the artist colormapped?
# Note that this doesn't handle size-mapping (which is more likely
# to involve an arbitrary scaling).
and artist.get_array() is not None
and len(artist.get_array()) == len(artist.get_offsets())):
value = _format_scalarmappable_value(artist, sel.target.index)
text = "{}\n{}".format(text, value)
if re.match("[^_]", label):
text = "{}\n{}".format(label, text)
return text
_Event = namedtuple("_Event", "xdata ydata")
@get_ann_text.register(AxesImage)
@_call_with_selection
def _(sel):
artist = sel.artist
text = _format_coord_unspaced(artist.axes, sel.target)
cursor_text = _format_scalarmappable_value(artist, sel.target.index)
return "{}\n{}".format(text, cursor_text)
@get_ann_text.register(Barbs)
@_call_with_selection
def _(sel):
artist = sel.artist
text = "{}\n{}".format(
_format_coord_unspaced(artist.axes, sel.target),
(artist.u[sel.target.index], artist.v[sel.target.index]))
return text
@get_ann_text.register(Quiver)
@_call_with_selection
def _(sel):
artist = sel.artist
text = "{}\n{}".format(
_format_coord_unspaced(artist.axes, sel.target),
(artist.U[sel.target.index], artist.V[sel.target.index]))
return text
@get_ann_text.register(ContainerArtist)
@_call_with_selection
def _(sel):
return get_ann_text(*sel._replace(artist=sel.artist.container))
@get_ann_text.register(BarContainer)
@_call_with_selection
def _(sel):
return _format_coord_unspaced(
_artist_in_container(sel.artist).axes, sel.target)
@get_ann_text.register(ErrorbarContainer)
@_call_with_selection
def _(sel):
data_line, cap_lines, err_lcs = sel.artist
ann_text = get_ann_text(*sel._replace(artist=data_line))
if isinstance(sel.target.index, Integral):
err_lcs = iter(err_lcs)
for idx, (dir, has) in enumerate(
zip("xy", [sel.artist.has_xerr, sel.artist.has_yerr])):
if has:
err = (next(err_lcs).get_paths()[sel.target.index].vertices
- data_line.get_xydata()[sel.target.index])[:, idx]
err_s = [getattr(_artist_in_container(sel.artist).axes,
"format_{}data".format(dir))(e).rstrip()
for e in err]
# We'd normally want to check err.sum() == 0, but that can run
# into fp inaccuracies.
if len({s.lstrip("+-") for s in err_s}) == 1:
repl = r"\1=$\2\\pm{}$\3".format(err_s[1])
else:
err_s = [("+" if not s.startswith(("+", "-")) else "") + s
for s in err_s]
repl = r"\1=$\2_{{{}}}^{{{}}}$\3".format(*err_s)
ann_text = re.sub("({})=(.*)(\n?)".format(dir), repl, ann_text)
return ann_text
@get_ann_text.register(StemContainer)
@_call_with_selection
def _(sel):
return get_ann_text(*sel._replace(artist=sel.artist.markerline))
@functools.singledispatch
@_call_with_selection
def move(sel, *, key):
"""
Move a `Selection` (passed **unpacked**) following a keypress.
This function is used to implement annotation displacement through the
keyboard.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
return sel
def _move_within_points(sel, xys, *, key):
# Avoid infinite loop in case everything became nan at some point.
for _ in range(len(xys)):
if key == "left":
new_idx = int(np.ceil(sel.target.index) - 1) % len(xys)
elif key == "right":
new_idx = int(np.floor(sel.target.index) + 1) % len(xys)
else:
return sel
target = _with_attrs(xys[new_idx], index=new_idx)
sel = sel._replace(target=target, dist=0)
if np.isfinite(target).all():
return sel
@move.register(Line2D)
@_call_with_selection
def _(sel, *, key):
data_xy = sel.artist.get_xydata()
return _move_within_points(
sel,
_untransform(data_xy, sel.artist.get_transform().transform(data_xy),
sel.artist.axes),
key=key)
@move.register(PathCollection)
@_call_with_selection
def _(sel, *, key):
if _is_scatter(sel.artist):
offsets = sel.artist.get_offsets()
return _move_within_points(
sel,
_untransform(
offsets, sel.artist.get_offset_transform().transform(offsets),
sel.artist.axes),
key=key)
else:
return sel
@move.register(AxesImage)
@_call_with_selection
def _(sel, *, key):
ns = sel.artist.get_array().shape
idxs = (np.asarray(sel.target.index)
+ {"left": [0, -1],
"right": [0, 1],
"up": {"lower": [1, 0], "upper": [-1, 0]}[sel.artist.origin],
"down": {"lower": [-1, 0], "upper": [1, 0]}[sel.artist.origin]}[
key]) % ns
xmin, xmax, ymin, ymax = sel.artist.get_extent()
if sel.artist.origin == "upper":
ymin, ymax = ymax, ymin
low, high = np.array([[xmin, ymin], [xmax, ymax]])
target = _with_attrs(((idxs + .5) / ns)[::-1] * (high - low) + low,
index=tuple(idxs))
return sel._replace(target=target)
@move.register(ContainerArtist)
@_call_with_selection
def _(sel, *, key):
return (move(*sel._replace(artist=sel.artist.container), key=key)
._replace(artist=sel.artist))
@move.register(ErrorbarContainer)
@_call_with_selection
def _(sel, *, key):
data_line, cap_lines, err_lcs = sel.artist
return _move_within_points(sel, data_line.get_xydata(), key=key)
@functools.singledispatch
@_call_with_selection
def make_highlight(sel, *, highlight_kwargs):
"""
Create a highlight for a `Selection`.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
"Highlight support for {} is missing".format(type(sel.artist)))
def _set_valid_props(artist, kwargs):
"""Set valid properties for the artist, dropping the others."""
artist.set(**{k: kwargs[k] for k in kwargs if hasattr(artist, "set_" + k)})
return artist
@make_highlight.register(Line2D)
@_call_with_selection
def _(sel, *, highlight_kwargs):
hl = copy.copy(sel.artist)
_set_valid_props(hl, highlight_kwargs)
return hl
@make_highlight.register(PathCollection)
@_call_with_selection
def _(sel, *, highlight_kwargs):
hl = copy.copy(sel.artist)
offsets = hl.get_offsets()
hl.set_offsets(np.where(
np.arange(len(offsets))[:, None] == sel.target.index, offsets, np.nan))
_set_valid_props(hl, highlight_kwargs)
return hl
|
anntzer/mplcursors
|
lib/mplcursors/_pick_info.py
|
_set_valid_props
|
python
|
def _set_valid_props(artist, kwargs):
artist.set(**{k: kwargs[k] for k in kwargs if hasattr(artist, "set_" + k)})
return artist
|
Set valid properties for the artist, dropping the others.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L765-L768
| null |
# Unsupported Artist classes: subclasses of AxesImage, QuadMesh (upstream could
# have a `format_coord`-like method); PolyCollection (picking is not well
# defined).
from collections import ChainMap, namedtuple
from contextlib import suppress
import copy
import functools
import inspect
from inspect import Signature
import itertools
from numbers import Integral
import re
import warnings
from weakref import WeakSet
from matplotlib import cbook
from matplotlib.axes import Axes
from matplotlib.backend_bases import RendererBase
from matplotlib.collections import (
LineCollection, PatchCollection, PathCollection)
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
from matplotlib.figure import Figure
from matplotlib.image import AxesImage
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, PathPatch, Polygon, Rectangle
from matplotlib.quiver import Barbs, Quiver
from matplotlib.text import Text
from matplotlib.transforms import Affine2D
import numpy as np
Integral.register(np.integer) # Back-compatibility for numpy 1.7, 1.8.
PATCH_PICKRADIUS = 5 # FIXME Patches do not provide `pickradius`.
def _register_scatter():
"""
Patch `PathCollection` and `scatter` to register their return values.
This registration allows us to distinguish `PathCollection`s created by
`Axes.scatter`, which should use point-like picking, from others, which
should use path-like picking. The former is more common, so we store the
latter instead; this also lets us guess the type better if this module is
imported late.
"""
@functools.wraps(PathCollection.__init__)
def __init__(self, *args, **kwargs):
_nonscatter_pathcollections.add(self)
return __init__.__wrapped__(self, *args, **kwargs)
PathCollection.__init__ = __init__
@functools.wraps(Axes.scatter)
def scatter(*args, **kwargs):
paths = scatter.__wrapped__(*args, **kwargs)
with suppress(KeyError):
_nonscatter_pathcollections.remove(paths)
return paths
Axes.scatter = scatter
_nonscatter_pathcollections = WeakSet()
_register_scatter()
def _is_scatter(artist):
return (isinstance(artist, PathCollection)
and artist not in _nonscatter_pathcollections)
def _artist_in_container(container):
return next(filter(None, container.get_children()))
class ContainerArtist:
"""Workaround to make containers behave more like artists."""
def __init__(self, container):
self.container = container # Guaranteed to be nonempty.
# We can't weakref the Container (which subclasses tuple), so
# we instead create a reference cycle between the Container and
# the ContainerArtist; as no one else strongly references the
# ContainerArtist, it will get GC'd whenever the Container is.
vars(container).setdefault(
"_{}__keep_alive".format(__class__.__name__), []).append(self)
def __str__(self):
return "<{}({})>".format(type(self).__name__, self.container)
def __repr__(self):
return "<{}({!r})>".format(type(self).__name__, self.container)
figure = property(lambda self: _artist_in_container(self.container).figure)
axes = property(lambda self: _artist_in_container(self.container).axes)
class AttrArray(np.ndarray):
"""An array subclass that can store additional attributes."""
def __new__(cls, array):
return np.asarray(array).view(cls)
def _with_attrs(array, **kwargs):
array = AttrArray(array)
for k, v in kwargs.items():
setattr(array, k, v)
return array
Selection = namedtuple("Selection", "artist target dist annotation extras")
# Override equality to identity: Selections should be considered immutable
# (with mutable fields though) and we don't want to trigger casts of array
# equality checks to booleans. We don't need to override comparisons because
# artists are already non-comparable.
Selection.__eq__ = lambda self, other: self is other
Selection.__ne__ = lambda self, other: self is not other
try:
Selection.artist.__doc__ = (
"The selected artist.")
Selection.target.__doc__ = (
"The point picked within the artist, in data coordinates.")
Selection.dist.__doc__ = (
"The distance from the click to the target, in pixels.")
Selection.annotation.__doc__ = (
"The instantiated `matplotlib.text.Annotation`.")
Selection.extras.__doc__ = (
"An additional list of artists (e.g., highlighters) that will be "
"cleared at the same time as the annotation.")
except AttributeError: # Read-only in Py3.4.
pass
@functools.singledispatch
def compute_pick(artist, event):
"""
Find whether *artist* has been picked by *event*.
If it has, return the appropriate `Selection`; otherwise return ``None``.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn("Pick support for {} is missing.".format(type(artist)))
class Index:
def __init__(self, i, x, y):
self.int = i
self.x = x
self.y = y
def floor(self):
return self.int
def ceil(self):
return self.int if max(self.x, self.y) == 0 else self.int + 1
def __format__(self, fmt):
return "{0.int}.(x={0.x:{1}}, y={0.y:{1}})".format(self, fmt)
def __str__(self):
return format(self, "")
@classmethod
def pre_index(cls, n_pts, index):
i, frac = divmod(index, 1)
i, odd = divmod(i, 2)
x, y = (0, frac) if not odd else (frac, 1)
return cls(i, x, y)
@classmethod
def post_index(cls, n_pts, index):
i, frac = divmod(index, 1)
i, odd = divmod(i, 2)
x, y = (frac, 0) if not odd else (1, frac)
return cls(i, x, y)
@classmethod
def mid_index(cls, n_pts, index):
i, frac = divmod(index, 1)
if i == 0:
frac = .5 + frac / 2
elif i == 2 * n_pts - 2: # One less line than points.
frac = frac / 2
quot, odd = divmod(i, 2)
if not odd:
if frac < .5:
i = quot - 1
x, y = frac + .5, 1
else:
i = quot
x, y = frac - .5, 0
else:
i = quot
x, y = .5, frac
return cls(i, x, y)
def _compute_projection_pick(artist, path, xy):
"""
Project *xy* on *path* to obtain a `Selection` for *artist*.
*path* is first transformed to screen coordinates using the artist
transform, and the target of the returned `Selection` is transformed
back to data coordinates using the artist *axes* inverse transform. The
`Selection` `index` is returned as a float. This function returns ``None``
for degenerate inputs.
The caller is responsible for converting the index to the proper class if
needed.
"""
transform = artist.get_transform().frozen()
tpath = (path.cleaned(transform) if transform.is_affine
# `cleaned` only handles affine transforms.
else transform.transform_path(path).cleaned())
# `cleaned` should return a path where the first element is `MOVETO`, the
# following are `LINETO` or `CLOSEPOLY`, and the last one is `STOP`, i.e.
# codes = path.codes
# assert (codes[0], codes[-1]) == (path.MOVETO, path.STOP)
# assert np.in1d(codes[1:-1], [path.LINETO, path.CLOSEPOLY]).all()
vertices = tpath.vertices[:-1]
codes = tpath.codes[:-1]
vertices[codes == tpath.CLOSEPOLY] = vertices[0]
# Unit vectors for each segment.
us = vertices[1:] - vertices[:-1]
ls = np.hypot(*us.T)
with np.errstate(invalid="ignore"):
# Results in 0/0 for repeated consecutive points.
us /= ls[:, None]
# Vectors from each vertex to the event (overwritten below).
vs = xy - vertices[:-1]
# Clipped dot products -- `einsum` cannot be done in place, `clip` can.
# `clip` can trigger invalid comparisons if there are nan points.
with np.errstate(invalid="ignore"):
dot = np.clip(np.einsum("ij,ij->i", vs, us), 0, ls, out=vs[:, 0])
# Projections.
projs = vertices[:-1] + dot[:, None] * us
ds = np.hypot(*(xy - projs).T, out=vs[:, 1])
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError): # See above re: exceptions caught.
return
else:
target = AttrArray(
artist.axes.transData.inverted().transform_point(projs[argmin]))
target.index = (
(argmin + dot[argmin] / ls[argmin])
/ (path._interpolation_steps / tpath._interpolation_steps))
return Selection(artist, target, dmin, None, None)
def _untransform(orig_xy, screen_xy, ax):
"""
Return data coordinates to place an annotation at screen coordinates
*screen_xy* in axes *ax*.
*orig_xy* are the "original" coordinates as stored by the artist; they are
transformed to *screen_xy* by whatever transform the artist uses. If the
artist uses ``ax.transData``, just return *orig_xy*; else, apply
``ax.transData.inverse()`` to *screen_xy*. (The first case is more
accurate than always applying ``ax.transData.inverse()``.)
"""
tr_xy = ax.transData.transform(orig_xy)
return (
orig_xy
if ((tr_xy == screen_xy) | np.isnan(tr_xy) & np.isnan(screen_xy)).all()
else ax.transData.inverted().transform(screen_xy))
@compute_pick.register(Line2D)
def _(artist, event):
# No need to call `line.contains` as we're going to redo the work anyways
# (also see matplotlib/matplotlib#6645, though that's fixed in mpl2.1).
# Always work in screen coordinates, as this is how we need to compute
# distances. Note that the artist transform may be different from the axes
# transform (e.g., for axvline).
xy = event.x, event.y
data_xy = artist.get_xydata()
data_screen_xy = artist.get_transform().transform(data_xy)
sels = []
# If markers are visible, find the closest vertex.
if artist.get_marker() not in ["None", "none", " ", "", None]:
ds = np.hypot(*(xy - data_screen_xy).T)
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError):
# numpy 1.7.0's `nanargmin([nan])` returns nan, so
# `ds[argmin]` raises IndexError. In later versions of numpy,
# `nanargmin([nan])` raises ValueError (the release notes for 1.8.0
# are incorrect on this topic).
pass
else:
# More precise than transforming back.
target = _with_attrs(
_untransform(
data_xy[argmin], data_screen_xy[argmin], artist.axes),
index=argmin)
sels.append(Selection(artist, target, dmin, None, None))
# If lines are visible, find the closest projection.
if (artist.get_linestyle() not in ["None", "none", " ", "", None]
and len(artist.get_xydata()) > 1):
sel = _compute_projection_pick(artist, artist.get_path(), xy)
if sel is not None:
sel.target.index = {
"_draw_lines": lambda _, index: index,
"_draw_steps_pre": Index.pre_index,
"_draw_steps_mid": Index.mid_index,
"_draw_steps_post": Index.post_index}[
Line2D.drawStyles[artist.get_drawstyle()]](
len(data_xy), sel.target.index)
sels.append(sel)
sel = min(sels, key=lambda sel: sel.dist, default=None)
return sel if sel and sel.dist < artist.get_pickradius() else None
@compute_pick.register(PathPatch)
@compute_pick.register(Polygon)
@compute_pick.register(Rectangle)
def _(artist, event):
sel = _compute_projection_pick(
artist, artist.get_path(), (event.x, event.y))
if sel and sel.dist < PATCH_PICKRADIUS:
return sel
@compute_pick.register(LineCollection)
@compute_pick.register(PatchCollection)
@compute_pick.register(PathCollection)
def _(artist, event):
# Use the C implementation to prune the list of segments.
contains, info = artist.contains(event)
if not contains:
return
offsets = artist.get_offsets()
paths = artist.get_paths()
if _is_scatter(artist):
inds = info["ind"]
offsets = artist.get_offsets()[inds]
offsets_screen = artist.get_offset_transform().transform(offsets)
ds = np.hypot(*(offsets_screen - [event.x, event.y]).T)
argmin = ds.argmin()
target = _with_attrs(
_untransform(offsets[argmin], offsets_screen[argmin], artist.axes),
index=inds[argmin])
return Selection(artist, target, ds[argmin], None, None)
else:
# Note that this won't select implicitly closed paths.
sels = [
_compute_projection_pick(
artist,
Affine2D().translate(*offsets[ind % len(offsets)])
.transform_path(paths[ind % len(paths)]),
(event.x, event.y))
for ind in info["ind"]]
sel, index = min(
((sel, info["ind"][idx]) for idx, sel in enumerate(sels) if sel),
key=lambda sel_idx: sel_idx[0].dist,
default=(None, None))
if sel:
sel = sel._replace(artist=artist)
sel.target.index = (index, sel.target.index)
if (isinstance(artist, PatchCollection)
and sel.dist >= PATCH_PICKRADIUS):
sel = None
return sel
@compute_pick.register(AxesImage)
def _(artist, event):
if type(artist) != AxesImage:
# Skip and warn on subclasses (`NonUniformImage`, `PcolorImage`) as
# they do not implement `contains` correctly. Even if they did, they
# would not support moving as we do not know where a given index maps
# back physically.
return compute_pick.dispatch(object)(artist, event)
contains, _ = artist.contains(event)
if not contains:
return
ns = np.asarray(artist.get_array().shape[:2])[::-1] # (y, x) -> (x, y)
xy = np.array([event.xdata, event.ydata])
xmin, xmax, ymin, ymax = artist.get_extent()
# Handling of "upper" origin copied from AxesImage.get_cursor_data.
if artist.origin == "upper":
ymin, ymax = ymax, ymin
low, high = np.array([[xmin, ymin], [xmax, ymax]])
idxs = ((xy - low) / (high - low) * ns).astype(int)[::-1]
target = _with_attrs(xy, index=tuple(idxs))
return Selection(artist, target, 0, None, None)
@compute_pick.register(Barbs)
@compute_pick.register(Quiver)
def _(artist, event):
offsets = artist.get_offsets()
offsets_screen = artist.get_offset_transform().transform(offsets)
ds = np.hypot(*(offsets_screen - [event.x, event.y]).T)
argmin = np.nanargmin(ds)
if ds[argmin] < artist.get_pickradius():
target = _with_attrs(
_untransform(offsets[argmin], offsets_screen[argmin], artist.axes),
index=argmin)
return Selection(artist, target, ds[argmin], None, None)
else:
return None
@compute_pick.register(Text)
def _(artist, event):
return
@compute_pick.register(ContainerArtist)
def _(artist, event):
sel = compute_pick(artist.container, event)
if sel:
sel = sel._replace(artist=artist)
return sel
@compute_pick.register(BarContainer)
def _(container, event):
try:
(idx, patch), = {
(idx, patch) for idx, patch in enumerate(container.patches)
if patch.contains(event)[0]}
except ValueError:
return
target = _with_attrs([event.xdata, event.ydata], index=idx)
if patch.sticky_edges.x:
target[0], = (
x for x in [patch.get_x(), patch.get_x() + patch.get_width()]
if x not in patch.sticky_edges.x)
if patch.sticky_edges.y:
target[1], = (
y for y in [patch.get_y(), patch.get_y() + patch.get_height()]
if y not in patch.sticky_edges.y)
return Selection(None, target, 0, None, None)
@compute_pick.register(ErrorbarContainer)
def _(container, event):
data_line, cap_lines, err_lcs = container
sel_data = compute_pick(data_line, event) if data_line else None
sel_err = min(
filter(None, (compute_pick(err_lc, event) for err_lc in err_lcs)),
key=lambda sel: sel.dist, default=None)
if (sel_data and sel_data.dist < getattr(sel_err, "dist", np.inf)):
return sel_data
elif sel_err:
idx, _ = sel_err.target.index
if data_line:
target = _with_attrs(data_line.get_xydata()[idx], index=idx)
else: # We can't guess the original data in that case!
return
return Selection(None, target, 0, None, None)
else:
return
@compute_pick.register(StemContainer)
def _(container, event):
sel = compute_pick(container.markerline, event)
if sel:
return sel
idx_sel = min(filter(lambda idx_sel: idx_sel[1] is not None,
((idx, compute_pick(line, event))
for idx, line in enumerate(container.stemlines))),
key=lambda idx_sel: idx_sel[1].dist, default=None)
if idx_sel:
idx, _ = idx_sel
target = _with_attrs(
container.stemlines[idx].get_xydata()[-1], index=idx)
return Selection(None, target, 0, None, None)
def _call_with_selection(func):
"""Decorator that passes a `Selection` built from the non-kwonly args."""
wrapped_kwonly_params = [
param for param in inspect.signature(func).parameters.values()
if param.kind == param.KEYWORD_ONLY]
sel_sig = inspect.signature(Selection)
default_sel_sig = sel_sig.replace(
parameters=[param.replace(default=None) if param.default is param.empty
else param
for param in sel_sig.parameters.values()])
@functools.wraps(func)
def wrapper(*args, **kwargs):
extra_kw = {param.name: kwargs.pop(param.name)
for param in wrapped_kwonly_params if param.name in kwargs}
ba = default_sel_sig.bind(*args, **kwargs)
# apply_defaults
ba.arguments = ChainMap(
ba.arguments,
{name: param.default
for name, param in default_sel_sig.parameters.items()
if param.default is not param.empty})
sel = Selection(*ba.args, **ba.kwargs)
return func(sel, **extra_kw)
wrapper.__signature__ = Signature(
list(sel_sig.parameters.values()) + wrapped_kwonly_params)
return wrapper
def _format_coord_unspaced(ax, xy):
# Un-space-pad, remove empty coordinates from the output of
# `format_{x,y}data`, and rejoin with newlines.
return "\n".join(
line for line, empty in zip(
re.split(",? +", ax.format_coord(*xy)),
itertools.chain(["x=", "y=", "z="], itertools.repeat(None)))
if line != empty).rstrip()
@functools.singledispatch
@_call_with_selection
def get_ann_text(sel):
"""
Compute an annotating text for a `Selection` (passed **unpacked**).
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
"Annotation support for {} is missing".format(type(sel.artist)))
return ""
def _strip_math(s):
return cbook.strip_math(s) if len(s) >= 2 and s[0] == s[-1] == "$" else s
def _format_scalarmappable_value(artist, idx): # matplotlib/matplotlib#12473.
data = artist.get_array()[idx]
if np.ndim(data) == 0:
if not artist.colorbar:
fig = Figure()
ax = fig.subplots()
artist.colorbar = fig.colorbar(artist, cax=ax)
# This hack updates the ticks without actually paying the cost of
# drawing (RendererBase.draw_path raises NotImplementedError).
try:
ax.yaxis.draw(RendererBase())
except NotImplementedError:
pass
fmt = artist.colorbar.formatter.format_data_short
return "[" + _strip_math(fmt(data).strip()) + "]"
else:
text = artist.format_cursor_data(data)
# get_cursor_data changed in Matplotlib 3.
if not re.match(r"\A\[.*\]\Z", text):
text = "[{}]".format(text)
return text
@get_ann_text.register(Line2D)
@get_ann_text.register(LineCollection)
@get_ann_text.register(PatchCollection)
@get_ann_text.register(PathCollection)
@get_ann_text.register(Patch)
@_call_with_selection
def _(sel):
artist = sel.artist
label = artist.get_label() or ""
text = _format_coord_unspaced(artist.axes, sel.target)
if (_is_scatter(artist)
# Heuristic: is the artist colormapped?
# Note that this doesn't handle size-mapping (which is more likely
# to involve an arbitrary scaling).
and artist.get_array() is not None
and len(artist.get_array()) == len(artist.get_offsets())):
value = _format_scalarmappable_value(artist, sel.target.index)
text = "{}\n{}".format(text, value)
if re.match("[^_]", label):
text = "{}\n{}".format(label, text)
return text
_Event = namedtuple("_Event", "xdata ydata")
@get_ann_text.register(AxesImage)
@_call_with_selection
def _(sel):
artist = sel.artist
text = _format_coord_unspaced(artist.axes, sel.target)
cursor_text = _format_scalarmappable_value(artist, sel.target.index)
return "{}\n{}".format(text, cursor_text)
@get_ann_text.register(Barbs)
@_call_with_selection
def _(sel):
artist = sel.artist
text = "{}\n{}".format(
_format_coord_unspaced(artist.axes, sel.target),
(artist.u[sel.target.index], artist.v[sel.target.index]))
return text
@get_ann_text.register(Quiver)
@_call_with_selection
def _(sel):
artist = sel.artist
text = "{}\n{}".format(
_format_coord_unspaced(artist.axes, sel.target),
(artist.U[sel.target.index], artist.V[sel.target.index]))
return text
@get_ann_text.register(ContainerArtist)
@_call_with_selection
def _(sel):
return get_ann_text(*sel._replace(artist=sel.artist.container))
@get_ann_text.register(BarContainer)
@_call_with_selection
def _(sel):
return _format_coord_unspaced(
_artist_in_container(sel.artist).axes, sel.target)
@get_ann_text.register(ErrorbarContainer)
@_call_with_selection
def _(sel):
data_line, cap_lines, err_lcs = sel.artist
ann_text = get_ann_text(*sel._replace(artist=data_line))
if isinstance(sel.target.index, Integral):
err_lcs = iter(err_lcs)
for idx, (dir, has) in enumerate(
zip("xy", [sel.artist.has_xerr, sel.artist.has_yerr])):
if has:
err = (next(err_lcs).get_paths()[sel.target.index].vertices
- data_line.get_xydata()[sel.target.index])[:, idx]
err_s = [getattr(_artist_in_container(sel.artist).axes,
"format_{}data".format(dir))(e).rstrip()
for e in err]
# We'd normally want to check err.sum() == 0, but that can run
# into fp inaccuracies.
if len({s.lstrip("+-") for s in err_s}) == 1:
repl = r"\1=$\2\\pm{}$\3".format(err_s[1])
else:
err_s = [("+" if not s.startswith(("+", "-")) else "") + s
for s in err_s]
repl = r"\1=$\2_{{{}}}^{{{}}}$\3".format(*err_s)
ann_text = re.sub("({})=(.*)(\n?)".format(dir), repl, ann_text)
return ann_text
@get_ann_text.register(StemContainer)
@_call_with_selection
def _(sel):
return get_ann_text(*sel._replace(artist=sel.artist.markerline))
@functools.singledispatch
@_call_with_selection
def move(sel, *, key):
"""
Move a `Selection` (passed **unpacked**) following a keypress.
This function is used to implement annotation displacement through the
keyboard.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
return sel
def _move_within_points(sel, xys, *, key):
# Avoid infinite loop in case everything became nan at some point.
for _ in range(len(xys)):
if key == "left":
new_idx = int(np.ceil(sel.target.index) - 1) % len(xys)
elif key == "right":
new_idx = int(np.floor(sel.target.index) + 1) % len(xys)
else:
return sel
target = _with_attrs(xys[new_idx], index=new_idx)
sel = sel._replace(target=target, dist=0)
if np.isfinite(target).all():
return sel
@move.register(Line2D)
@_call_with_selection
def _(sel, *, key):
data_xy = sel.artist.get_xydata()
return _move_within_points(
sel,
_untransform(data_xy, sel.artist.get_transform().transform(data_xy),
sel.artist.axes),
key=key)
@move.register(PathCollection)
@_call_with_selection
def _(sel, *, key):
if _is_scatter(sel.artist):
offsets = sel.artist.get_offsets()
return _move_within_points(
sel,
_untransform(
offsets, sel.artist.get_offset_transform().transform(offsets),
sel.artist.axes),
key=key)
else:
return sel
@move.register(AxesImage)
@_call_with_selection
def _(sel, *, key):
ns = sel.artist.get_array().shape
idxs = (np.asarray(sel.target.index)
+ {"left": [0, -1],
"right": [0, 1],
"up": {"lower": [1, 0], "upper": [-1, 0]}[sel.artist.origin],
"down": {"lower": [-1, 0], "upper": [1, 0]}[sel.artist.origin]}[
key]) % ns
xmin, xmax, ymin, ymax = sel.artist.get_extent()
if sel.artist.origin == "upper":
ymin, ymax = ymax, ymin
low, high = np.array([[xmin, ymin], [xmax, ymax]])
target = _with_attrs(((idxs + .5) / ns)[::-1] * (high - low) + low,
index=tuple(idxs))
return sel._replace(target=target)
@move.register(ContainerArtist)
@_call_with_selection
def _(sel, *, key):
return (move(*sel._replace(artist=sel.artist.container), key=key)
._replace(artist=sel.artist))
@move.register(ErrorbarContainer)
@_call_with_selection
def _(sel, *, key):
data_line, cap_lines, err_lcs = sel.artist
return _move_within_points(sel, data_line.get_xydata(), key=key)
@functools.singledispatch
@_call_with_selection
def make_highlight(sel, *, highlight_kwargs):
"""
Create a highlight for a `Selection`.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
"Highlight support for {} is missing".format(type(sel.artist)))
@make_highlight.register(Line2D)
@_call_with_selection
def _(sel, *, highlight_kwargs):
hl = copy.copy(sel.artist)
_set_valid_props(hl, highlight_kwargs)
return hl
@make_highlight.register(PathCollection)
@_call_with_selection
def _(sel, *, highlight_kwargs):
hl = copy.copy(sel.artist)
offsets = hl.get_offsets()
hl.set_offsets(np.where(
np.arange(len(offsets))[:, None] == sel.target.index, offsets, np.nan))
_set_valid_props(hl, highlight_kwargs)
return hl
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
_get_rounded_intersection_area
|
python
|
def _get_rounded_intersection_area(bbox_1, bbox_2):
# The rounding allows sorting areas without floating point issues.
bbox = bbox_1.intersection(bbox_1, bbox_2)
return round(bbox.width * bbox.height, 8) if bbox else 0
|
Compute the intersection area between two bboxes rounded to 8 digits.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L66-L70
| null |
from collections import ChainMap, Counter
from collections.abc import Iterable
from contextlib import suppress
import copy
from functools import partial
import sys
import weakref
from weakref import WeakKeyDictionary
from matplotlib.axes import Axes
from matplotlib.container import Container
from matplotlib.figure import Figure
import numpy as np
from . import _pick_info
_default_bindings = dict(
select=1,
deselect=3,
left="shift+left",
right="shift+right",
up="shift+up",
down="shift+down",
toggle_enabled="e",
toggle_visible="v",
)
_default_annotation_kwargs = dict(
textcoords="offset points",
bbox=dict(
boxstyle="round,pad=.5",
fc="yellow",
alpha=.5,
ec="k",
),
arrowprops=dict(
arrowstyle="->",
connectionstyle="arc3",
shrinkB=0,
ec="k",
),
)
_default_annotation_positions = [
dict(position=(-15, 15), ha="right", va="bottom"),
dict(position=(15, 15), ha="left", va="bottom"),
dict(position=(15, -15), ha="left", va="top"),
dict(position=(-15, -15), ha="right", va="top"),
]
_default_highlight_kwargs = dict(
# Only the kwargs corresponding to properties of the artist will be passed.
# Line2D.
color="yellow",
markeredgecolor="yellow",
linewidth=3,
markeredgewidth=3,
# PathCollection.
facecolor="yellow",
edgecolor="yellow",
)
class _MarkedStr(str):
"""A string subclass solely for marking purposes."""
def _iter_axes_subartists(ax):
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*."""
yield from ax.collections
yield from ax.images
yield from ax.lines
yield from ax.patches
yield from ax.texts
def _is_alive(artist):
"""Check whether *artist* is still present on its parent axes."""
return bool(artist
and artist.axes
and (artist.container in artist.axes.containers
if isinstance(artist, _pick_info.ContainerArtist) else
artist in _iter_axes_subartists(artist.axes)))
def _reassigned_axes_event(event, ax):
"""Reassign *event* to *ax*."""
event = copy.copy(event)
event.xdata, event.ydata = (
ax.transData.inverted().transform_point((event.x, event.y)))
return event
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
def cursor(pickables=None, **kwargs):
"""
Create a `Cursor` for a list of artists, containers, and axes.
Parameters
----------
pickables : Optional[List[Union[Artist, Container, Axes, Figure]]]
All artists and containers in the list or on any of the axes or
figures passed in the list are selectable by the constructed `Cursor`.
Defaults to all artists and containers on any of the figures that
:mod:`~matplotlib.pyplot` is tracking. Note that the latter will only
work when relying on pyplot, not when figures are directly instantiated
(e.g., when manually embedding Matplotlib in a GUI toolkit).
**kwargs
Keyword arguments are passed to the `Cursor` constructor.
"""
if pickables is None:
# Do not import pyplot ourselves to avoid forcing the backend.
plt = sys.modules.get("matplotlib.pyplot")
pickables = [
plt.figure(num) for num in plt.get_fignums()] if plt else []
elif (isinstance(pickables, Container)
or not isinstance(pickables, Iterable)):
pickables = [pickables]
def iter_unpack_figures(pickables):
for entry in pickables:
if isinstance(entry, Figure):
yield from entry.axes
else:
yield entry
def iter_unpack_axes(pickables):
for entry in pickables:
if isinstance(entry, Axes):
yield from _iter_axes_subartists(entry)
containers.extend(entry.containers)
elif isinstance(entry, Container):
containers.append(entry)
else:
yield entry
containers = []
artists = list(iter_unpack_axes(iter_unpack_figures(pickables)))
for container in containers:
contained = list(filter(None, container.get_children()))
for artist in contained:
with suppress(ValueError):
artists.remove(artist)
if contained:
artists.append(_pick_info.ContainerArtist(container))
return Cursor(artists, **kwargs)
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
_iter_axes_subartists
|
python
|
def _iter_axes_subartists(ax):
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*."""
yield from ax.collections
yield from ax.images
yield from ax.lines
yield from ax.patches
yield from ax.texts
|
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L73-L79
| null |
from collections import ChainMap, Counter
from collections.abc import Iterable
from contextlib import suppress
import copy
from functools import partial
import sys
import weakref
from weakref import WeakKeyDictionary
from matplotlib.axes import Axes
from matplotlib.container import Container
from matplotlib.figure import Figure
import numpy as np
from . import _pick_info
_default_bindings = dict(
select=1,
deselect=3,
left="shift+left",
right="shift+right",
up="shift+up",
down="shift+down",
toggle_enabled="e",
toggle_visible="v",
)
_default_annotation_kwargs = dict(
textcoords="offset points",
bbox=dict(
boxstyle="round,pad=.5",
fc="yellow",
alpha=.5,
ec="k",
),
arrowprops=dict(
arrowstyle="->",
connectionstyle="arc3",
shrinkB=0,
ec="k",
),
)
_default_annotation_positions = [
dict(position=(-15, 15), ha="right", va="bottom"),
dict(position=(15, 15), ha="left", va="bottom"),
dict(position=(15, -15), ha="left", va="top"),
dict(position=(-15, -15), ha="right", va="top"),
]
_default_highlight_kwargs = dict(
# Only the kwargs corresponding to properties of the artist will be passed.
# Line2D.
color="yellow",
markeredgecolor="yellow",
linewidth=3,
markeredgewidth=3,
# PathCollection.
facecolor="yellow",
edgecolor="yellow",
)
class _MarkedStr(str):
"""A string subclass solely for marking purposes."""
def _get_rounded_intersection_area(bbox_1, bbox_2):
"""Compute the intersection area between two bboxes rounded to 8 digits."""
# The rounding allows sorting areas without floating point issues.
bbox = bbox_1.intersection(bbox_1, bbox_2)
return round(bbox.width * bbox.height, 8) if bbox else 0
def _is_alive(artist):
"""Check whether *artist* is still present on its parent axes."""
return bool(artist
and artist.axes
and (artist.container in artist.axes.containers
if isinstance(artist, _pick_info.ContainerArtist) else
artist in _iter_axes_subartists(artist.axes)))
def _reassigned_axes_event(event, ax):
"""Reassign *event* to *ax*."""
event = copy.copy(event)
event.xdata, event.ydata = (
ax.transData.inverted().transform_point((event.x, event.y)))
return event
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
def cursor(pickables=None, **kwargs):
"""
Create a `Cursor` for a list of artists, containers, and axes.
Parameters
----------
pickables : Optional[List[Union[Artist, Container, Axes, Figure]]]
All artists and containers in the list or on any of the axes or
figures passed in the list are selectable by the constructed `Cursor`.
Defaults to all artists and containers on any of the figures that
:mod:`~matplotlib.pyplot` is tracking. Note that the latter will only
work when relying on pyplot, not when figures are directly instantiated
(e.g., when manually embedding Matplotlib in a GUI toolkit).
**kwargs
Keyword arguments are passed to the `Cursor` constructor.
"""
if pickables is None:
# Do not import pyplot ourselves to avoid forcing the backend.
plt = sys.modules.get("matplotlib.pyplot")
pickables = [
plt.figure(num) for num in plt.get_fignums()] if plt else []
elif (isinstance(pickables, Container)
or not isinstance(pickables, Iterable)):
pickables = [pickables]
def iter_unpack_figures(pickables):
for entry in pickables:
if isinstance(entry, Figure):
yield from entry.axes
else:
yield entry
def iter_unpack_axes(pickables):
for entry in pickables:
if isinstance(entry, Axes):
yield from _iter_axes_subartists(entry)
containers.extend(entry.containers)
elif isinstance(entry, Container):
containers.append(entry)
else:
yield entry
containers = []
artists = list(iter_unpack_axes(iter_unpack_figures(pickables)))
for container in containers:
contained = list(filter(None, container.get_children()))
for artist in contained:
with suppress(ValueError):
artists.remove(artist)
if contained:
artists.append(_pick_info.ContainerArtist(container))
return Cursor(artists, **kwargs)
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
_is_alive
|
python
|
def _is_alive(artist):
return bool(artist
and artist.axes
and (artist.container in artist.axes.containers
if isinstance(artist, _pick_info.ContainerArtist) else
artist in _iter_axes_subartists(artist.axes)))
|
Check whether *artist* is still present on its parent axes.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L82-L88
| null |
from collections import ChainMap, Counter
from collections.abc import Iterable
from contextlib import suppress
import copy
from functools import partial
import sys
import weakref
from weakref import WeakKeyDictionary
from matplotlib.axes import Axes
from matplotlib.container import Container
from matplotlib.figure import Figure
import numpy as np
from . import _pick_info
_default_bindings = dict(
select=1,
deselect=3,
left="shift+left",
right="shift+right",
up="shift+up",
down="shift+down",
toggle_enabled="e",
toggle_visible="v",
)
_default_annotation_kwargs = dict(
textcoords="offset points",
bbox=dict(
boxstyle="round,pad=.5",
fc="yellow",
alpha=.5,
ec="k",
),
arrowprops=dict(
arrowstyle="->",
connectionstyle="arc3",
shrinkB=0,
ec="k",
),
)
_default_annotation_positions = [
dict(position=(-15, 15), ha="right", va="bottom"),
dict(position=(15, 15), ha="left", va="bottom"),
dict(position=(15, -15), ha="left", va="top"),
dict(position=(-15, -15), ha="right", va="top"),
]
_default_highlight_kwargs = dict(
# Only the kwargs corresponding to properties of the artist will be passed.
# Line2D.
color="yellow",
markeredgecolor="yellow",
linewidth=3,
markeredgewidth=3,
# PathCollection.
facecolor="yellow",
edgecolor="yellow",
)
class _MarkedStr(str):
"""A string subclass solely for marking purposes."""
def _get_rounded_intersection_area(bbox_1, bbox_2):
"""Compute the intersection area between two bboxes rounded to 8 digits."""
# The rounding allows sorting areas without floating point issues.
bbox = bbox_1.intersection(bbox_1, bbox_2)
return round(bbox.width * bbox.height, 8) if bbox else 0
def _iter_axes_subartists(ax):
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*."""
yield from ax.collections
yield from ax.images
yield from ax.lines
yield from ax.patches
yield from ax.texts
def _reassigned_axes_event(event, ax):
"""Reassign *event* to *ax*."""
event = copy.copy(event)
event.xdata, event.ydata = (
ax.transData.inverted().transform_point((event.x, event.y)))
return event
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
def cursor(pickables=None, **kwargs):
"""
Create a `Cursor` for a list of artists, containers, and axes.
Parameters
----------
pickables : Optional[List[Union[Artist, Container, Axes, Figure]]]
All artists and containers in the list or on any of the axes or
figures passed in the list are selectable by the constructed `Cursor`.
Defaults to all artists and containers on any of the figures that
:mod:`~matplotlib.pyplot` is tracking. Note that the latter will only
work when relying on pyplot, not when figures are directly instantiated
(e.g., when manually embedding Matplotlib in a GUI toolkit).
**kwargs
Keyword arguments are passed to the `Cursor` constructor.
"""
if pickables is None:
# Do not import pyplot ourselves to avoid forcing the backend.
plt = sys.modules.get("matplotlib.pyplot")
pickables = [
plt.figure(num) for num in plt.get_fignums()] if plt else []
elif (isinstance(pickables, Container)
or not isinstance(pickables, Iterable)):
pickables = [pickables]
def iter_unpack_figures(pickables):
for entry in pickables:
if isinstance(entry, Figure):
yield from entry.axes
else:
yield entry
def iter_unpack_axes(pickables):
for entry in pickables:
if isinstance(entry, Axes):
yield from _iter_axes_subartists(entry)
containers.extend(entry.containers)
elif isinstance(entry, Container):
containers.append(entry)
else:
yield entry
containers = []
artists = list(iter_unpack_axes(iter_unpack_figures(pickables)))
for container in containers:
contained = list(filter(None, container.get_children()))
for artist in contained:
with suppress(ValueError):
artists.remove(artist)
if contained:
artists.append(_pick_info.ContainerArtist(container))
return Cursor(artists, **kwargs)
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
_reassigned_axes_event
|
python
|
def _reassigned_axes_event(event, ax):
event = copy.copy(event)
event.xdata, event.ydata = (
ax.transData.inverted().transform_point((event.x, event.y)))
return event
|
Reassign *event* to *ax*.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L91-L96
| null |
from collections import ChainMap, Counter
from collections.abc import Iterable
from contextlib import suppress
import copy
from functools import partial
import sys
import weakref
from weakref import WeakKeyDictionary
from matplotlib.axes import Axes
from matplotlib.container import Container
from matplotlib.figure import Figure
import numpy as np
from . import _pick_info
_default_bindings = dict(
select=1,
deselect=3,
left="shift+left",
right="shift+right",
up="shift+up",
down="shift+down",
toggle_enabled="e",
toggle_visible="v",
)
_default_annotation_kwargs = dict(
textcoords="offset points",
bbox=dict(
boxstyle="round,pad=.5",
fc="yellow",
alpha=.5,
ec="k",
),
arrowprops=dict(
arrowstyle="->",
connectionstyle="arc3",
shrinkB=0,
ec="k",
),
)
_default_annotation_positions = [
dict(position=(-15, 15), ha="right", va="bottom"),
dict(position=(15, 15), ha="left", va="bottom"),
dict(position=(15, -15), ha="left", va="top"),
dict(position=(-15, -15), ha="right", va="top"),
]
_default_highlight_kwargs = dict(
# Only the kwargs corresponding to properties of the artist will be passed.
# Line2D.
color="yellow",
markeredgecolor="yellow",
linewidth=3,
markeredgewidth=3,
# PathCollection.
facecolor="yellow",
edgecolor="yellow",
)
class _MarkedStr(str):
"""A string subclass solely for marking purposes."""
def _get_rounded_intersection_area(bbox_1, bbox_2):
"""Compute the intersection area between two bboxes rounded to 8 digits."""
# The rounding allows sorting areas without floating point issues.
bbox = bbox_1.intersection(bbox_1, bbox_2)
return round(bbox.width * bbox.height, 8) if bbox else 0
def _iter_axes_subartists(ax):
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*."""
yield from ax.collections
yield from ax.images
yield from ax.lines
yield from ax.patches
yield from ax.texts
def _is_alive(artist):
"""Check whether *artist* is still present on its parent axes."""
return bool(artist
and artist.axes
and (artist.container in artist.axes.containers
if isinstance(artist, _pick_info.ContainerArtist) else
artist in _iter_axes_subartists(artist.axes)))
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
def cursor(pickables=None, **kwargs):
"""
Create a `Cursor` for a list of artists, containers, and axes.
Parameters
----------
pickables : Optional[List[Union[Artist, Container, Axes, Figure]]]
All artists and containers in the list or on any of the axes or
figures passed in the list are selectable by the constructed `Cursor`.
Defaults to all artists and containers on any of the figures that
:mod:`~matplotlib.pyplot` is tracking. Note that the latter will only
work when relying on pyplot, not when figures are directly instantiated
(e.g., when manually embedding Matplotlib in a GUI toolkit).
**kwargs
Keyword arguments are passed to the `Cursor` constructor.
"""
if pickables is None:
# Do not import pyplot ourselves to avoid forcing the backend.
plt = sys.modules.get("matplotlib.pyplot")
pickables = [
plt.figure(num) for num in plt.get_fignums()] if plt else []
elif (isinstance(pickables, Container)
or not isinstance(pickables, Iterable)):
pickables = [pickables]
def iter_unpack_figures(pickables):
for entry in pickables:
if isinstance(entry, Figure):
yield from entry.axes
else:
yield entry
def iter_unpack_axes(pickables):
for entry in pickables:
if isinstance(entry, Axes):
yield from _iter_axes_subartists(entry)
containers.extend(entry.containers)
elif isinstance(entry, Container):
containers.append(entry)
else:
yield entry
containers = []
artists = list(iter_unpack_axes(iter_unpack_figures(pickables)))
for container in containers:
contained = list(filter(None, container.get_children()))
for artist in contained:
with suppress(ValueError):
artists.remove(artist)
if contained:
artists.append(_pick_info.ContainerArtist(container))
return Cursor(artists, **kwargs)
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
cursor
|
python
|
def cursor(pickables=None, **kwargs):
if pickables is None:
# Do not import pyplot ourselves to avoid forcing the backend.
plt = sys.modules.get("matplotlib.pyplot")
pickables = [
plt.figure(num) for num in plt.get_fignums()] if plt else []
elif (isinstance(pickables, Container)
or not isinstance(pickables, Iterable)):
pickables = [pickables]
def iter_unpack_figures(pickables):
for entry in pickables:
if isinstance(entry, Figure):
yield from entry.axes
else:
yield entry
def iter_unpack_axes(pickables):
for entry in pickables:
if isinstance(entry, Axes):
yield from _iter_axes_subartists(entry)
containers.extend(entry.containers)
elif isinstance(entry, Container):
containers.append(entry)
else:
yield entry
containers = []
artists = list(iter_unpack_axes(iter_unpack_figures(pickables)))
for container in containers:
contained = list(filter(None, container.get_children()))
for artist in contained:
with suppress(ValueError):
artists.remove(artist)
if contained:
artists.append(_pick_info.ContainerArtist(container))
return Cursor(artists, **kwargs)
|
Create a `Cursor` for a list of artists, containers, and axes.
Parameters
----------
pickables : Optional[List[Union[Artist, Container, Axes, Figure]]]
All artists and containers in the list or on any of the axes or
figures passed in the list are selectable by the constructed `Cursor`.
Defaults to all artists and containers on any of the figures that
:mod:`~matplotlib.pyplot` is tracking. Note that the latter will only
work when relying on pyplot, not when figures are directly instantiated
(e.g., when manually embedding Matplotlib in a GUI toolkit).
**kwargs
Keyword arguments are passed to the `Cursor` constructor.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L546-L601
|
[
"def iter_unpack_figures(pickables):\n for entry in pickables:\n if isinstance(entry, Figure):\n yield from entry.axes\n else:\n yield entry\n",
"def iter_unpack_axes(pickables):\n for entry in pickables:\n if isinstance(entry, Axes):\n yield from _iter_axes_subartists(entry)\n containers.extend(entry.containers)\n elif isinstance(entry, Container):\n containers.append(entry)\n else:\n yield entry\n"
] |
from collections import ChainMap, Counter
from collections.abc import Iterable
from contextlib import suppress
import copy
from functools import partial
import sys
import weakref
from weakref import WeakKeyDictionary
from matplotlib.axes import Axes
from matplotlib.container import Container
from matplotlib.figure import Figure
import numpy as np
from . import _pick_info
_default_bindings = dict(
select=1,
deselect=3,
left="shift+left",
right="shift+right",
up="shift+up",
down="shift+down",
toggle_enabled="e",
toggle_visible="v",
)
_default_annotation_kwargs = dict(
textcoords="offset points",
bbox=dict(
boxstyle="round,pad=.5",
fc="yellow",
alpha=.5,
ec="k",
),
arrowprops=dict(
arrowstyle="->",
connectionstyle="arc3",
shrinkB=0,
ec="k",
),
)
_default_annotation_positions = [
dict(position=(-15, 15), ha="right", va="bottom"),
dict(position=(15, 15), ha="left", va="bottom"),
dict(position=(15, -15), ha="left", va="top"),
dict(position=(-15, -15), ha="right", va="top"),
]
_default_highlight_kwargs = dict(
# Only the kwargs corresponding to properties of the artist will be passed.
# Line2D.
color="yellow",
markeredgecolor="yellow",
linewidth=3,
markeredgewidth=3,
# PathCollection.
facecolor="yellow",
edgecolor="yellow",
)
class _MarkedStr(str):
"""A string subclass solely for marking purposes."""
def _get_rounded_intersection_area(bbox_1, bbox_2):
"""Compute the intersection area between two bboxes rounded to 8 digits."""
# The rounding allows sorting areas without floating point issues.
bbox = bbox_1.intersection(bbox_1, bbox_2)
return round(bbox.width * bbox.height, 8) if bbox else 0
def _iter_axes_subartists(ax):
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*."""
yield from ax.collections
yield from ax.images
yield from ax.lines
yield from ax.patches
yield from ax.texts
def _is_alive(artist):
"""Check whether *artist* is still present on its parent axes."""
return bool(artist
and artist.axes
and (artist.container in artist.axes.containers
if isinstance(artist, _pick_info.ContainerArtist) else
artist in _iter_axes_subartists(artist.axes)))
def _reassigned_axes_event(event, ax):
"""Reassign *event* to *ax*."""
event = copy.copy(event)
event.xdata, event.ydata = (
ax.transData.inverted().transform_point((event.x, event.y)))
return event
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
Cursor.selections
|
python
|
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
|
r"""The tuple of current `Selection`\s.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L259-L265
| null |
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
Cursor.add_selection
|
python
|
def add_selection(self, pi):
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
|
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L284-L372
|
[
"def _get_rounded_intersection_area(bbox_1, bbox_2):\n \"\"\"Compute the intersection area between two bboxes rounded to 8 digits.\"\"\"\n # The rounding allows sorting areas without floating point issues.\n bbox = bbox_1.intersection(bbox_1, bbox_2)\n return round(bbox.width * bbox.height, 8) if bbox else 0\n",
"def add_highlight(self, artist, *args, **kwargs):\n \"\"\"\n Create, add, and return a highlighting artist.\n\n This method is should be called with an \"unpacked\" `Selection`,\n possibly with some fields set to None.\n\n It is up to the caller to register the artist with the proper\n `Selection` (by calling ``sel.extras.append`` on the result of this\n method) in order to ensure cleanup upon deselection.\n \"\"\"\n hl = _pick_info.make_highlight(\n artist, *args,\n **ChainMap({\"highlight_kwargs\": self.highlight_kwargs}, kwargs))\n if hl:\n artist.axes.add_artist(hl)\n return hl\n",
"def remove_selection(self, sel):\n \"\"\"Remove a `Selection`.\"\"\"\n self._selections.remove(sel)\n # <artist>.figure will be unset so we save them first.\n figures = {artist.figure for artist in [sel.annotation] + sel.extras}\n # ValueError is raised if the artist has already been removed.\n with suppress(ValueError):\n sel.annotation.remove()\n for artist in sel.extras:\n with suppress(ValueError):\n artist.remove()\n for cb in self._callbacks[\"remove\"]:\n cb(sel)\n for figure in figures:\n figure.canvas.draw_idle()\n"
] |
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
Cursor.add_highlight
|
python
|
def add_highlight(self, artist, *args, **kwargs):
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
|
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L374-L390
| null |
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
Cursor.connect
|
python
|
def connect(self, event, func=None):
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
|
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L392-L424
| null |
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
Cursor.disconnect
|
python
|
def disconnect(self, event, cb):
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
|
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L426-L438
| null |
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
Cursor.remove
|
python
|
def remove(self):
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
|
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L440-L453
|
[
"def remove_selection(self, sel):\n \"\"\"Remove a `Selection`.\"\"\"\n self._selections.remove(sel)\n # <artist>.figure will be unset so we save them first.\n figures = {artist.figure for artist in [sel.annotation] + sel.extras}\n # ValueError is raised if the artist has already been removed.\n with suppress(ValueError):\n sel.annotation.remove()\n for artist in sel.extras:\n with suppress(ValueError):\n artist.remove()\n for cb in self._callbacks[\"remove\"]:\n cb(sel)\n for figure in figures:\n figure.canvas.draw_idle()\n"
] |
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
|
anntzer/mplcursors
|
lib/mplcursors/_mplcursors.py
|
Cursor.remove_selection
|
python
|
def remove_selection(self, sel):
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle()
|
Remove a `Selection`.
|
train
|
https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L529-L543
| null |
class Cursor:
"""
A cursor for selecting Matplotlib artists.
Attributes
----------
bindings : dict
See the *bindings* keyword argument to the constructor.
annotation_kwargs : dict
See the *annotation_kwargs* keyword argument to the constructor.
annotation_positions : dict
See the *annotation_positions* keyword argument to the constructor.
highlight_kwargs : dict
See the *highlight_kwargs* keyword argument to the constructor.
"""
_keep_alive = WeakKeyDictionary()
def __init__(self,
artists,
*,
multiple=False,
highlight=False,
hover=False,
bindings=None,
annotation_kwargs=None,
annotation_positions=None,
highlight_kwargs=None):
"""
Construct a cursor.
Parameters
----------
artists : List[Artist]
A list of artists that can be selected by this cursor.
multiple : bool, optional
Whether multiple artists can be "on" at the same time (defaults to
False).
highlight : bool, optional
Whether to also highlight the selected artist. If so,
"highlighter" artists will be placed as the first item in the
:attr:`extras` attribute of the `Selection`.
hover : bool, optional
Whether to select artists upon hovering instead of by clicking.
(Hovering over an artist while a button is pressed will not trigger
a selection; right clicking on an annotation will still remove it.)
bindings : dict, optional
A mapping of button and keybindings to actions. Valid entries are:
================ ==================================================
'select' mouse button to select an artist
(default: 1)
'deselect' mouse button to deselect an artist
(default: 3)
'left' move to the previous point in the selected path,
or to the left in the selected image
(default: shift+left)
'right' move to the next point in the selected path, or to
the right in the selected image
(default: shift+right)
'up' move up in the selected image
(default: shift+up)
'down' move down in the selected image
(default: shift+down)
'toggle_enabled' toggle whether the cursor is active
(default: e)
'toggle_visible' toggle default cursor visibility and apply it to
all cursors (default: v)
================ ==================================================
Missing entries will be set to the defaults. In order to not
assign any binding to an action, set it to ``None``.
annotation_kwargs : dict, optional
Keyword argments passed to the `annotate
<matplotlib.axes.Axes.annotate>` call.
annotation_positions : List[dict], optional
List of positions tried by the annotation positioning algorithm.
highlight_kwargs : dict, optional
Keyword arguments used to create a highlighted artist.
"""
artists = list(artists)
# Be careful with GC.
self._artists = [weakref.ref(artist) for artist in artists]
for artist in artists:
type(self)._keep_alive.setdefault(artist, set()).add(self)
self._multiple = multiple
self._highlight = highlight
self._visible = True
self._enabled = True
self._selections = []
self._last_auto_position = None
self._callbacks = {"add": [], "remove": []}
connect_pairs = [("key_press_event", self._on_key_press)]
if hover:
if multiple:
raise ValueError("'hover' and 'multiple' are incompatible")
connect_pairs += [
("motion_notify_event", self._hover_handler),
("button_press_event", self._hover_handler)]
else:
connect_pairs += [
("button_press_event", self._nonhover_handler)]
self._disconnectors = [
partial(canvas.mpl_disconnect, canvas.mpl_connect(*pair))
for pair in connect_pairs
for canvas in {artist.figure.canvas for artist in artists}]
bindings = dict(ChainMap(bindings if bindings is not None else {},
_default_bindings))
unknown_bindings = set(bindings) - set(_default_bindings)
if unknown_bindings:
raise ValueError("Unknown binding(s): {}".format(
", ".join(sorted(unknown_bindings))))
duplicate_bindings = [
k for k, v in Counter(list(bindings.values())).items() if v > 1]
if duplicate_bindings:
raise ValueError("Duplicate binding(s): {}".format(
", ".join(sorted(map(str, duplicate_bindings)))))
self.bindings = bindings
self.annotation_kwargs = (
annotation_kwargs if annotation_kwargs is not None
else copy.deepcopy(_default_annotation_kwargs))
self.annotation_positions = (
annotation_positions if annotation_positions is not None
else copy.deepcopy(_default_annotation_positions))
self.highlight_kwargs = (
highlight_kwargs if highlight_kwargs is not None
else copy.deepcopy(_default_highlight_kwargs))
@property
def artists(self):
"""The tuple of selectable artists."""
# Work around matplotlib/matplotlib#6982: `cla()` does not clear
# `.axes`.
return tuple(filter(_is_alive, (ref() for ref in self._artists)))
@property
def enabled(self):
"""Whether clicks are registered for picking and unpicking events."""
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections)
@property
def visible(self):
"""
Whether selections are visible by default.
Setting this property also updates the visibility status of current
selections.
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
for sel in self.selections:
sel.annotation.set_visible(value)
sel.annotation.figure.canvas.draw_idle()
def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel
def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl
def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func
def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event))
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
def _nonhover_handler(self, event):
if event.name == "button_press_event":
if event.button == self.bindings["select"]:
self._on_select_button_press(event)
if event.button == self.bindings["deselect"]:
self._on_deselect_button_press(event)
def _hover_handler(self, event):
if event.name == "motion_notify_event" and event.button is None:
# Filter away events where the mouse is pressed, in particular to
# avoid conflicts between hover and draggable.
self._on_select_button_press(event)
elif (event.name == "button_press_event"
and event.button == self.bindings["deselect"]):
# Still allow removing the annotation by right clicking.
self._on_deselect_button_press(event)
def _filter_mouse_event(self, event):
# Accept the event iff we are enabled, and either
# - no other widget is active, and this is not the second click of a
# double click (to prevent double selection), or
# - another widget is active, and this is a double click (to bypass
# the widget lock).
return (self.enabled
and event.canvas.widgetlock.locked() == event.dblclick)
def _on_select_button_press(self, event):
if not self._filter_mouse_event(event):
return
# Work around lack of support for twinned axes.
per_axes_event = {ax: _reassigned_axes_event(event, ax)
for ax in {artist.axes for artist in self.artists}}
pis = []
for artist in self.artists:
if (artist.axes is None # Removed or figure-level artist.
or event.canvas is not artist.figure.canvas
or not artist.axes.contains(event)[0]): # Cropped by axes.
continue
pi = _pick_info.compute_pick(artist, per_axes_event[artist.axes])
if pi and not any((pi.artist, tuple(pi.target))
== (other.artist, tuple(other.target))
for other in self._selections):
pis.append(pi)
if not pis:
return
self.add_selection(min(pis, key=lambda pi: pi.dist))
def _on_deselect_button_press(self, event):
if not self._filter_mouse_event(event):
return
for sel in self.selections[::-1]: # LIFO.
ann = sel.annotation
if event.canvas is not ann.figure.canvas:
continue
contained, _ = ann.contains(event)
if contained:
self.remove_selection(sel)
break
def _on_key_press(self, event):
if event.key == self.bindings["toggle_enabled"]:
self.enabled = not self.enabled
elif event.key == self.bindings["toggle_visible"]:
self.visible = not self.visible
try:
sel = self.selections[-1]
except IndexError:
return
for key in ["left", "right", "up", "down"]:
if event.key == self.bindings[key]:
self.remove_selection(sel)
self.add_selection(_pick_info.move(*sel, key=key))
break
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews._get_sync
|
python
|
def _get_sync(self, url):
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
|
Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L74-L90
| null |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews._get_async
|
python
|
async def _get_async(self, url, session):
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
|
Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L92-L107
| null |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews._async_loop
|
python
|
async def _async_loop(self, urls):
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
|
Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L109-L127
|
[
"async def _get_async(self, url, session):\n \"\"\"Asynchronous internal method used for GET requests\n\n Args:\n url (str): URL to fetch\n session (obj): aiohttp client session for async loop\n\n Returns:\n data (obj): Individual URL request's response corountine\n\n \"\"\"\n data = None\n async with session.get(url) as resp:\n if resp.status == 200:\n data = await resp.json()\n return data\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews._run_async
|
python
|
def _run_async(self, urls):
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
|
Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L129-L141
|
[
"async def _async_loop(self, urls):\n \"\"\"Asynchronous internal method used to request multiple URLs\n\n Args:\n urls (list): URLs to fetch\n\n Returns:\n responses (obj): All URL requests' response coroutines\n\n \"\"\"\n results = []\n async with aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(ssl=False)\n ) as session:\n for url in urls:\n result = asyncio.ensure_future(self._get_async(url, session))\n results.append(result)\n responses = await asyncio.gather(*results)\n return responses\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews._get_stories
|
python
|
def _get_stories(self, page, limit):
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
|
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L143-L163
|
[
"def _get_sync(self, url):\n \"\"\"Internal method used for GET requests\n\n Args:\n url (str): URL to fetch\n\n Returns:\n Individual URL request's response\n\n Raises:\n HTTPError: If HTTP request failed.\n \"\"\"\n response = self.session.get(url)\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n raise HTTPError\n",
"def get_items_by_ids(self, item_ids, item_type=None):\n \"\"\"Given a list of item ids, return all the Item objects\n\n Args:\n item_ids (obj): List of item IDs to query\n item_type (str): (optional) Item type to filter results with\n\n Returns:\n List of `Item` objects for given item IDs and given item type\n\n \"\"\"\n urls = [urljoin(self.item_url, F\"{i}.json\") for i in item_ids]\n result = self._run_async(urls=urls)\n items = [Item(r) for r in result if r]\n if item_type:\n return [item for item in items if item.item_type == item_type]\n else:\n return items\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.get_item
|
python
|
def get_item(self, item_id, expand=False):
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
|
Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L165-L202
|
[
"def _get_sync(self, url):\n \"\"\"Internal method used for GET requests\n\n Args:\n url (str): URL to fetch\n\n Returns:\n Individual URL request's response\n\n Raises:\n HTTPError: If HTTP request failed.\n \"\"\"\n response = self.session.get(url)\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n raise HTTPError\n",
"def get_item(self, item_id, expand=False):\n \"\"\"Returns Hacker News `Item` object.\n\n Fetches the data from url:\n https://hacker-news.firebaseio.com/v0/item/<item_id>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json\n\n Args:\n item_id (int or string): Unique item id of Hacker News story,\n comment etc.\n expand (bool): expand (bool): Flag to indicate whether to\n transform all IDs into objects.\n\n Returns:\n `Item` object representing Hacker News item.\n\n Raises:\n InvalidItemID: If corresponding Hacker News story does not exist.\n\n \"\"\"\n url = urljoin(self.item_url, F\"{item_id}.json\")\n response = self._get_sync(url)\n\n if not response:\n raise InvalidItemID\n\n item = Item(response)\n if expand:\n item.by = self.get_user(item.by)\n item.kids = self.get_items_by_ids(item.kids) if item.kids else None\n item.parent = self.get_item(item.parent) if item.parent else None\n item.poll = self.get_item(item.poll) if item.poll else None\n item.parts = (\n self.get_items_by_ids(item.parts) if item.parts else None\n )\n\n return item\n",
"def get_items_by_ids(self, item_ids, item_type=None):\n \"\"\"Given a list of item ids, return all the Item objects\n\n Args:\n item_ids (obj): List of item IDs to query\n item_type (str): (optional) Item type to filter results with\n\n Returns:\n List of `Item` objects for given item IDs and given item type\n\n \"\"\"\n urls = [urljoin(self.item_url, F\"{i}.json\") for i in item_ids]\n result = self._run_async(urls=urls)\n items = [Item(r) for r in result if r]\n if item_type:\n return [item for item in items if item.item_type == item_type]\n else:\n return items\n",
"def get_user(self, user_id, expand=False):\n \"\"\"Returns Hacker News `User` object.\n\n Fetches data from the url:\n https://hacker-news.firebaseio.com/v0/user/<user_id>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/user/pg.json\n\n Args:\n user_id (string): unique user id of a Hacker News user.\n expand (bool): Flag to indicate whether to\n transform all IDs into objects.\n\n Returns:\n `User` object representing a user on Hacker News.\n\n Raises:\n InvalidUserID: If no such user exists on Hacker News.\n\n \"\"\"\n url = urljoin(self.user_url, F\"{user_id}.json\")\n response = self._get_sync(url)\n\n if not response:\n raise InvalidUserID\n\n user = User(response)\n if expand and user.submitted:\n items = self.get_items_by_ids(user.submitted)\n user_opt = {\n 'stories': 'story',\n 'comments': 'comment',\n 'jobs': 'job',\n 'polls': 'poll',\n 'pollopts': 'pollopt'\n }\n for key, value in user_opt.items():\n setattr(\n user,\n key,\n [i for i in items if i.item_type == value]\n )\n\n return user\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.get_items_by_ids
|
python
|
def get_items_by_ids(self, item_ids, item_type=None):
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
|
Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L204-L221
|
[
"def _run_async(self, urls):\n \"\"\"Asynchronous event loop execution\n\n Args:\n urls (list): URLs to fetch\n\n Returns:\n results (obj): All URL requests' responses\n\n \"\"\"\n loop = asyncio.get_event_loop()\n results = loop.run_until_complete(self._async_loop(urls))\n return results\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.get_user
|
python
|
def get_user(self, user_id, expand=False):
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
|
Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L223-L266
|
[
"def _get_sync(self, url):\n \"\"\"Internal method used for GET requests\n\n Args:\n url (str): URL to fetch\n\n Returns:\n Individual URL request's response\n\n Raises:\n HTTPError: If HTTP request failed.\n \"\"\"\n response = self.session.get(url)\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n raise HTTPError\n",
"def get_items_by_ids(self, item_ids, item_type=None):\n \"\"\"Given a list of item ids, return all the Item objects\n\n Args:\n item_ids (obj): List of item IDs to query\n item_type (str): (optional) Item type to filter results with\n\n Returns:\n List of `Item` objects for given item IDs and given item type\n\n \"\"\"\n urls = [urljoin(self.item_url, F\"{i}.json\") for i in item_ids]\n result = self._run_async(urls=urls)\n items = [Item(r) for r in result if r]\n if item_type:\n return [item for item in items if item.item_type == item_type]\n else:\n return items\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.get_users_by_ids
|
python
|
def get_users_by_ids(self, user_ids):
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
|
Given a list of user ids, return all the User objects
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L268-L274
|
[
"def _run_async(self, urls):\n \"\"\"Asynchronous event loop execution\n\n Args:\n urls (list): URLs to fetch\n\n Returns:\n results (obj): All URL requests' responses\n\n \"\"\"\n loop = asyncio.get_event_loop()\n results = loop.run_until_complete(self._async_loop(urls))\n return results\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.top_stories
|
python
|
def top_stories(self, raw=False, limit=None):
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
|
Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L276-L291
|
[
"def _get_stories(self, page, limit):\n \"\"\"\n Hacker News has different categories (i.e. stories) like\n 'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.\n This method, first fetches the relevant story ids of that category\n\n The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/topstories.json\n\n Then, asynchronously it fetches each story and returns the Item objects\n\n The URL for individual story is:\n https://hacker-news.firebaseio.com/v0/item/<item_id>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json\n\n \"\"\"\n url = urljoin(self.base_url, F\"{page}.json\")\n story_ids = self._get_sync(url)[:limit]\n return self.get_items_by_ids(item_ids=story_ids)\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.new_stories
|
python
|
def new_stories(self, raw=False, limit=None):
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
|
Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L293-L308
|
[
"def _get_stories(self, page, limit):\n \"\"\"\n Hacker News has different categories (i.e. stories) like\n 'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.\n This method, first fetches the relevant story ids of that category\n\n The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/topstories.json\n\n Then, asynchronously it fetches each story and returns the Item objects\n\n The URL for individual story is:\n https://hacker-news.firebaseio.com/v0/item/<item_id>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json\n\n \"\"\"\n url = urljoin(self.base_url, F\"{page}.json\")\n story_ids = self._get_sync(url)[:limit]\n return self.get_items_by_ids(item_ids=story_ids)\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.ask_stories
|
python
|
def ask_stories(self, raw=False, limit=None):
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
|
Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L310-L325
|
[
"def _get_stories(self, page, limit):\n \"\"\"\n Hacker News has different categories (i.e. stories) like\n 'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.\n This method, first fetches the relevant story ids of that category\n\n The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/topstories.json\n\n Then, asynchronously it fetches each story and returns the Item objects\n\n The URL for individual story is:\n https://hacker-news.firebaseio.com/v0/item/<item_id>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json\n\n \"\"\"\n url = urljoin(self.base_url, F\"{page}.json\")\n story_ids = self._get_sync(url)[:limit]\n return self.get_items_by_ids(item_ids=story_ids)\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.show_stories
|
python
|
def show_stories(self, raw=False, limit=None):
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
|
Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L327-L342
|
[
"def _get_stories(self, page, limit):\n \"\"\"\n Hacker News has different categories (i.e. stories) like\n 'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.\n This method, first fetches the relevant story ids of that category\n\n The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/topstories.json\n\n Then, asynchronously it fetches each story and returns the Item objects\n\n The URL for individual story is:\n https://hacker-news.firebaseio.com/v0/item/<item_id>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json\n\n \"\"\"\n url = urljoin(self.base_url, F\"{page}.json\")\n story_ids = self._get_sync(url)[:limit]\n return self.get_items_by_ids(item_ids=story_ids)\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.job_stories
|
python
|
def job_stories(self, raw=False, limit=None):
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
|
Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L344-L359
|
[
"def _get_stories(self, page, limit):\n \"\"\"\n Hacker News has different categories (i.e. stories) like\n 'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.\n This method, first fetches the relevant story ids of that category\n\n The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/topstories.json\n\n Then, asynchronously it fetches each story and returns the Item objects\n\n The URL for individual story is:\n https://hacker-news.firebaseio.com/v0/item/<item_id>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json\n\n \"\"\"\n url = urljoin(self.base_url, F\"{page}.json\")\n story_ids = self._get_sync(url)[:limit]\n return self.get_items_by_ids(item_ids=story_ids)\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.updates
|
python
|
def updates(self):
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
|
Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L361-L377
|
[
"def _get_sync(self, url):\n \"\"\"Internal method used for GET requests\n\n Args:\n url (str): URL to fetch\n\n Returns:\n Individual URL request's response\n\n Raises:\n HTTPError: If HTTP request failed.\n \"\"\"\n response = self.session.get(url)\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n raise HTTPError\n",
"def get_items_by_ids(self, item_ids, item_type=None):\n \"\"\"Given a list of item ids, return all the Item objects\n\n Args:\n item_ids (obj): List of item IDs to query\n item_type (str): (optional) Item type to filter results with\n\n Returns:\n List of `Item` objects for given item IDs and given item type\n\n \"\"\"\n urls = [urljoin(self.item_url, F\"{i}.json\") for i in item_ids]\n result = self._run_async(urls=urls)\n items = [Item(r) for r in result if r]\n if item_type:\n return [item for item in items if item.item_type == item_type]\n else:\n return items\n",
"def get_users_by_ids(self, user_ids):\n \"\"\"\n Given a list of user ids, return all the User objects\n \"\"\"\n urls = [urljoin(self.user_url, F\"{i}.json\") for i in user_ids]\n result = self._run_async(urls=urls)\n return [User(r) for r in result if r]\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.get_max_item
|
python
|
def get_max_item(self, expand=False):
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
|
The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L379-L398
|
[
"def _get_sync(self, url):\n \"\"\"Internal method used for GET requests\n\n Args:\n url (str): URL to fetch\n\n Returns:\n Individual URL request's response\n\n Raises:\n HTTPError: If HTTP request failed.\n \"\"\"\n response = self.session.get(url)\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n raise HTTPError\n",
"def get_item(self, item_id, expand=False):\n \"\"\"Returns Hacker News `Item` object.\n\n Fetches the data from url:\n https://hacker-news.firebaseio.com/v0/item/<item_id>.json\n\n e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json\n\n Args:\n item_id (int or string): Unique item id of Hacker News story,\n comment etc.\n expand (bool): expand (bool): Flag to indicate whether to\n transform all IDs into objects.\n\n Returns:\n `Item` object representing Hacker News item.\n\n Raises:\n InvalidItemID: If corresponding Hacker News story does not exist.\n\n \"\"\"\n url = urljoin(self.item_url, F\"{item_id}.json\")\n response = self._get_sync(url)\n\n if not response:\n raise InvalidItemID\n\n item = Item(response)\n if expand:\n item.by = self.get_user(item.by)\n item.kids = self.get_items_by_ids(item.kids) if item.kids else None\n item.parent = self.get_item(item.parent) if item.parent else None\n item.poll = self.get_item(item.poll) if item.poll else None\n item.parts = (\n self.get_items_by_ids(item.parts) if item.parts else None\n )\n\n return item\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
avinassh/haxor
|
hackernews/__init__.py
|
HackerNews.get_last
|
python
|
def get_last(self, num=10):
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
|
train
|
https://github.com/avinassh/haxor/blob/71dbecf87531f7a24bb39c736d53127427aaca84/hackernews/__init__.py#L412-L425
|
[
"def _run_async(self, urls):\n \"\"\"Asynchronous event loop execution\n\n Args:\n urls (list): URLs to fetch\n\n Returns:\n results (obj): All URL requests' responses\n\n \"\"\"\n loop = asyncio.get_event_loop()\n results = loop.run_until_complete(self._async_loop(urls))\n return results\n",
"def get_max_item(self, expand=False):\n \"\"\"The current largest item id\n\n Fetches data from URL:\n https://hacker-news.firebaseio.com/v0/maxitem.json\n\n Args:\n expand (bool): Flag to indicate whether to transform all\n IDs into objects.\n\n Returns:\n `int` if successful.\n\n \"\"\"\n url = urljoin(self.base_url, 'maxitem.json')\n response = self._get_sync(url)\n if expand:\n return self.get_item(response)\n else:\n return response\n"
] |
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version.
Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
self.item_url = urljoin(self.base_url, 'item/')
self.user_url = urljoin(self.base_url, 'user/')
self.session = requests.Session()
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
def _get_stories(self, page, limit):
"""
Hacker News has different categories (i.e. stories) like
'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'.
This method, first fetches the relevant story ids of that category
The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json
e.g. https://hacker-news.firebaseio.com/v0/topstories.json
Then, asynchronously it fetches each story and returns the Item objects
The URL for individual story is:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
"""
url = urljoin(self.base_url, F"{page}.json")
story_ids = self._get_sync(url)[:limit]
return self.get_items_by_ids(item_ids=story_ids)
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/updates.json
Returns:
`dict` with two keys whose values are `list` objects
"""
url = urljoin(self.base_url, 'updates.json')
response = self._get_sync(url)
return {
'items': self.get_items_by_ids(item_ids=response['items']),
'profiles': self.get_users_by_ids(user_ids=response['profiles'])
}
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
def get_all(self):
"""Returns ENTIRE Hacker News!
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
return self.get_last(num=max_item)
|
Zsailer/phylopandas
|
phylopandas/treeio/read.py
|
_dendropy_to_dataframe
|
python
|
def _dendropy_to_dataframe(
tree,
add_node_labels=True,
use_uids=True):
# Maximum distance from root.
tree.max_distance_from_root()
# Initialize the data object.
idx = []
data = {
'type': [],
'id': [],
'parent': [],
'length': [],
'label': [],
'distance': []}
if use_uids:
data['uid'] = []
# Add labels to internal nodes if set to true.
if add_node_labels:
for i, node in enumerate(tree.internal_nodes()):
node.label = str(i)
for node in tree.nodes():
# Get node type
if node.is_leaf():
type_ = 'leaf'
label = str(node.taxon.label).replace(' ', '_')
elif node.is_internal():
type_ = 'node'
label = str(node.label)
# Set node label and parent.
id_ = label
parent_node = node.parent_node
length = node.edge_length
distance = node.distance_from_root()
# Is this node a root?
if parent_node is None and length is None:
parent_label = None
parent_node = None
length = 0
distance = 0
type_ = 'root'
# Set parent node label
elif parent_node.is_internal():
parent_label = str(parent_node.label)
else:
raise Exception("Subtree is not attached to tree?")
# Add this node to the data.
data['type'].append(type_)
data['id'].append(id_)
data['parent'].append(parent_label)
data['length'].append(length)
data['label'].append(label)
data['distance'].append(distance)
if use_uids:
data['uid'].append(get_random_id(10))
# Construct dataframe.
df = pandas.DataFrame(data)
return df
|
Convert Dendropy tree to Pandas dataframe.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/read.py#L35-L104
| null |
import pandas
import dendropy
from ..utils import get_random_id
def _read_doc_template(schema):
doc = """
Read a {} tree into a phylopandas.DataFrame.
The resulting DataFrame has the following columns:
- name: label for each taxa or node.
- id: unique id (created by phylopandas) given to each node.
- type: type of node (leaf, internal, or root).
- parent: parent id. necessary for constructing trees.
- length: length of branch from parent to node.
- distance: distance from root.
Parameters
----------
filename: str (default is None)
{} file to read into DataFrame.
data: str (default is None)
{} string to parse and read into DataFrame.
add_node_labels: bool
If true, labels the internal nodes with numbers.
Returns
-------
df: phylopandas.DataFrame
""".format(schema, schema, schema)
return doc
def _read(
filename=None,
data=None,
schema=None,
add_node_labels=True,
use_uids=True
):
"""Read a phylogenetic tree into a phylopandas.DataFrame.
The resulting DataFrame has the following columns:
- name: label for each taxa or node.
- id: unique id (created by phylopandas) given to each node.
- type: type of node (leaf, internal, or root).
- parent: parent id. necessary for constructing trees.
- length: length of branch from parent to node.
- distance: distance from root.
Parameters
----------
filename: str (default is None)
newick file to read into DataFrame.
data: str (default is None)
newick string to parse and read into DataFrame.
add_node_labels: bool
If true, labels the internal nodes with numbers.
Returns
-------
df: phylopandas.DataFrame.
"""
if filename is not None:
# Use Dendropy to parse tree.
tree = dendropy.Tree.get(
path=filename,
schema=schema,
preserve_underscores=True)
elif data is not None:
tree = dendropy.Tree.get(
data=data,
schema=schema,
preserve_underscores=True)
else:
raise Exception('No tree given?')
df = _dendropy_to_dataframe(
tree,
add_node_labels=add_node_labels,
use_uids=use_uids
)
return df
def _read_method(schema):
"""Add a write method for named schema to a class.
"""
def func(
self,
filename=None,
data=None,
add_node_labels=True,
combine_on='index',
use_uids=True,
**kwargs):
# Use generic write class to write data.
df0 = self._data
df1 = _read(
filename=filename,
data=data,
schema=schema,
add_node_labels=add_node_labels,
use_uids=use_uids,
**kwargs
)
return df0.phylo.combine(df1, on=combine_on)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
def _read_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
filename=None,
data=None,
add_node_labels=True,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
data=data,
schema=schema,
add_node_labels=add_node_labels,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
def read_dendropy(
df,
add_node_labels=True,
use_uids=True):
__doc__ = _read_doc_template('dendropy')
df = _dendropy_to_dataframe(
tree,
add_node_labels=add_node_labels,
use_uids=use_uids
)
return df
read_newick = _read_function('newick')
read_nexml = _read_function('nexml')
read_nexus_tree = _read_function('nexus')
|
Zsailer/phylopandas
|
phylopandas/treeio/read.py
|
_read
|
python
|
def _read(
filename=None,
data=None,
schema=None,
add_node_labels=True,
use_uids=True
):
if filename is not None:
# Use Dendropy to parse tree.
tree = dendropy.Tree.get(
path=filename,
schema=schema,
preserve_underscores=True)
elif data is not None:
tree = dendropy.Tree.get(
data=data,
schema=schema,
preserve_underscores=True)
else:
raise Exception('No tree given?')
df = _dendropy_to_dataframe(
tree,
add_node_labels=add_node_labels,
use_uids=use_uids
)
return df
|
Read a phylogenetic tree into a phylopandas.DataFrame.
The resulting DataFrame has the following columns:
- name: label for each taxa or node.
- id: unique id (created by phylopandas) given to each node.
- type: type of node (leaf, internal, or root).
- parent: parent id. necessary for constructing trees.
- length: length of branch from parent to node.
- distance: distance from root.
Parameters
----------
filename: str (default is None)
newick file to read into DataFrame.
data: str (default is None)
newick string to parse and read into DataFrame.
add_node_labels: bool
If true, labels the internal nodes with numbers.
Returns
-------
df: phylopandas.DataFrame.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/read.py#L107-L158
|
[
"def _dendropy_to_dataframe(\n tree,\n add_node_labels=True,\n use_uids=True):\n \"\"\"Convert Dendropy tree to Pandas dataframe.\"\"\"\n # Maximum distance from root.\n tree.max_distance_from_root()\n\n # Initialize the data object.\n idx = []\n data = {\n 'type': [],\n 'id': [],\n 'parent': [],\n 'length': [],\n 'label': [],\n 'distance': []}\n\n if use_uids:\n data['uid'] = []\n\n # Add labels to internal nodes if set to true.\n if add_node_labels:\n for i, node in enumerate(tree.internal_nodes()):\n node.label = str(i)\n\n for node in tree.nodes():\n # Get node type\n if node.is_leaf():\n type_ = 'leaf'\n label = str(node.taxon.label).replace(' ', '_')\n elif node.is_internal():\n type_ = 'node'\n label = str(node.label)\n\n # Set node label and parent.\n id_ = label\n parent_node = node.parent_node\n length = node.edge_length\n distance = node.distance_from_root()\n\n # Is this node a root?\n if parent_node is None and length is None:\n parent_label = None\n parent_node = None\n length = 0\n distance = 0\n type_ = 'root'\n\n # Set parent node label\n elif parent_node.is_internal():\n parent_label = str(parent_node.label)\n\n else:\n raise Exception(\"Subtree is not attached to tree?\")\n\n # Add this node to the data.\n data['type'].append(type_)\n data['id'].append(id_)\n data['parent'].append(parent_label)\n data['length'].append(length)\n data['label'].append(label)\n data['distance'].append(distance)\n\n if use_uids:\n data['uid'].append(get_random_id(10))\n\n # Construct dataframe.\n df = pandas.DataFrame(data)\n return df\n"
] |
import pandas
import dendropy
from ..utils import get_random_id
def _read_doc_template(schema):
doc = """
Read a {} tree into a phylopandas.DataFrame.
The resulting DataFrame has the following columns:
- name: label for each taxa or node.
- id: unique id (created by phylopandas) given to each node.
- type: type of node (leaf, internal, or root).
- parent: parent id. necessary for constructing trees.
- length: length of branch from parent to node.
- distance: distance from root.
Parameters
----------
filename: str (default is None)
{} file to read into DataFrame.
data: str (default is None)
{} string to parse and read into DataFrame.
add_node_labels: bool
If true, labels the internal nodes with numbers.
Returns
-------
df: phylopandas.DataFrame
""".format(schema, schema, schema)
return doc
def _dendropy_to_dataframe(
tree,
add_node_labels=True,
use_uids=True):
"""Convert Dendropy tree to Pandas dataframe."""
# Maximum distance from root.
tree.max_distance_from_root()
# Initialize the data object.
idx = []
data = {
'type': [],
'id': [],
'parent': [],
'length': [],
'label': [],
'distance': []}
if use_uids:
data['uid'] = []
# Add labels to internal nodes if set to true.
if add_node_labels:
for i, node in enumerate(tree.internal_nodes()):
node.label = str(i)
for node in tree.nodes():
# Get node type
if node.is_leaf():
type_ = 'leaf'
label = str(node.taxon.label).replace(' ', '_')
elif node.is_internal():
type_ = 'node'
label = str(node.label)
# Set node label and parent.
id_ = label
parent_node = node.parent_node
length = node.edge_length
distance = node.distance_from_root()
# Is this node a root?
if parent_node is None and length is None:
parent_label = None
parent_node = None
length = 0
distance = 0
type_ = 'root'
# Set parent node label
elif parent_node.is_internal():
parent_label = str(parent_node.label)
else:
raise Exception("Subtree is not attached to tree?")
# Add this node to the data.
data['type'].append(type_)
data['id'].append(id_)
data['parent'].append(parent_label)
data['length'].append(length)
data['label'].append(label)
data['distance'].append(distance)
if use_uids:
data['uid'].append(get_random_id(10))
# Construct dataframe.
df = pandas.DataFrame(data)
return df
def _read_method(schema):
"""Add a write method for named schema to a class.
"""
def func(
self,
filename=None,
data=None,
add_node_labels=True,
combine_on='index',
use_uids=True,
**kwargs):
# Use generic write class to write data.
df0 = self._data
df1 = _read(
filename=filename,
data=data,
schema=schema,
add_node_labels=add_node_labels,
use_uids=use_uids,
**kwargs
)
return df0.phylo.combine(df1, on=combine_on)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
def _read_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
filename=None,
data=None,
add_node_labels=True,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
data=data,
schema=schema,
add_node_labels=add_node_labels,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
def read_dendropy(
df,
add_node_labels=True,
use_uids=True):
__doc__ = _read_doc_template('dendropy')
df = _dendropy_to_dataframe(
tree,
add_node_labels=add_node_labels,
use_uids=use_uids
)
return df
read_newick = _read_function('newick')
read_nexml = _read_function('nexml')
read_nexus_tree = _read_function('nexus')
|
Zsailer/phylopandas
|
phylopandas/treeio/read.py
|
_read_function
|
python
|
def _read_function(schema):
def func(
filename=None,
data=None,
add_node_labels=True,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
data=data,
schema=schema,
add_node_labels=add_node_labels,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
|
Add a write method for named schema to a class.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/read.py#L189-L209
|
[
"def _read_doc_template(schema):\n doc = \"\"\"\n Read a {} tree into a phylopandas.DataFrame.\n\n The resulting DataFrame has the following columns:\n - name: label for each taxa or node.\n - id: unique id (created by phylopandas) given to each node.\n - type: type of node (leaf, internal, or root).\n - parent: parent id. necessary for constructing trees.\n - length: length of branch from parent to node.\n - distance: distance from root.\n\n Parameters\n ----------\n filename: str (default is None)\n {} file to read into DataFrame.\n\n data: str (default is None)\n {} string to parse and read into DataFrame.\n\n add_node_labels: bool\n If true, labels the internal nodes with numbers.\n\n Returns\n -------\n df: phylopandas.DataFrame\n \"\"\".format(schema, schema, schema)\n return doc\n"
] |
import pandas
import dendropy
from ..utils import get_random_id
def _read_doc_template(schema):
doc = """
Read a {} tree into a phylopandas.DataFrame.
The resulting DataFrame has the following columns:
- name: label for each taxa or node.
- id: unique id (created by phylopandas) given to each node.
- type: type of node (leaf, internal, or root).
- parent: parent id. necessary for constructing trees.
- length: length of branch from parent to node.
- distance: distance from root.
Parameters
----------
filename: str (default is None)
{} file to read into DataFrame.
data: str (default is None)
{} string to parse and read into DataFrame.
add_node_labels: bool
If true, labels the internal nodes with numbers.
Returns
-------
df: phylopandas.DataFrame
""".format(schema, schema, schema)
return doc
def _dendropy_to_dataframe(
tree,
add_node_labels=True,
use_uids=True):
"""Convert Dendropy tree to Pandas dataframe."""
# Maximum distance from root.
tree.max_distance_from_root()
# Initialize the data object.
idx = []
data = {
'type': [],
'id': [],
'parent': [],
'length': [],
'label': [],
'distance': []}
if use_uids:
data['uid'] = []
# Add labels to internal nodes if set to true.
if add_node_labels:
for i, node in enumerate(tree.internal_nodes()):
node.label = str(i)
for node in tree.nodes():
# Get node type
if node.is_leaf():
type_ = 'leaf'
label = str(node.taxon.label).replace(' ', '_')
elif node.is_internal():
type_ = 'node'
label = str(node.label)
# Set node label and parent.
id_ = label
parent_node = node.parent_node
length = node.edge_length
distance = node.distance_from_root()
# Is this node a root?
if parent_node is None and length is None:
parent_label = None
parent_node = None
length = 0
distance = 0
type_ = 'root'
# Set parent node label
elif parent_node.is_internal():
parent_label = str(parent_node.label)
else:
raise Exception("Subtree is not attached to tree?")
# Add this node to the data.
data['type'].append(type_)
data['id'].append(id_)
data['parent'].append(parent_label)
data['length'].append(length)
data['label'].append(label)
data['distance'].append(distance)
if use_uids:
data['uid'].append(get_random_id(10))
# Construct dataframe.
df = pandas.DataFrame(data)
return df
def _read(
filename=None,
data=None,
schema=None,
add_node_labels=True,
use_uids=True
):
"""Read a phylogenetic tree into a phylopandas.DataFrame.
The resulting DataFrame has the following columns:
- name: label for each taxa or node.
- id: unique id (created by phylopandas) given to each node.
- type: type of node (leaf, internal, or root).
- parent: parent id. necessary for constructing trees.
- length: length of branch from parent to node.
- distance: distance from root.
Parameters
----------
filename: str (default is None)
newick file to read into DataFrame.
data: str (default is None)
newick string to parse and read into DataFrame.
add_node_labels: bool
If true, labels the internal nodes with numbers.
Returns
-------
df: phylopandas.DataFrame.
"""
if filename is not None:
# Use Dendropy to parse tree.
tree = dendropy.Tree.get(
path=filename,
schema=schema,
preserve_underscores=True)
elif data is not None:
tree = dendropy.Tree.get(
data=data,
schema=schema,
preserve_underscores=True)
else:
raise Exception('No tree given?')
df = _dendropy_to_dataframe(
tree,
add_node_labels=add_node_labels,
use_uids=use_uids
)
return df
def _read_method(schema):
"""Add a write method for named schema to a class.
"""
def func(
self,
filename=None,
data=None,
add_node_labels=True,
combine_on='index',
use_uids=True,
**kwargs):
# Use generic write class to write data.
df0 = self._data
df1 = _read(
filename=filename,
data=data,
schema=schema,
add_node_labels=add_node_labels,
use_uids=use_uids,
**kwargs
)
return df0.phylo.combine(df1, on=combine_on)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
def read_dendropy(
df,
add_node_labels=True,
use_uids=True):
__doc__ = _read_doc_template('dendropy')
df = _dendropy_to_dataframe(
tree,
add_node_labels=add_node_labels,
use_uids=use_uids
)
return df
read_newick = _read_function('newick')
read_nexml = _read_function('nexml')
read_nexus_tree = _read_function('nexus')
|
Zsailer/phylopandas
|
phylopandas/seqio/write.py
|
pandas_df_to_biopython_seqrecord
|
python
|
def pandas_df_to_biopython_seqrecord(
df,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
):
seq_records = []
for i, row in df.iterrows():
# Tries getting sequence data. If a TypeError at the seqrecord
# creation is thrown, it is assumed that this row does not contain
# sequence data and therefore the row is ignored.
try:
# Get sequence
seq = Seq(row[sequence_col], alphabet=alphabet)
# Get id
id = row[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([row[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records.append(record)
except TypeError:
pass
return seq_records
|
Convert pandas dataframe to biopython seqrecord for easy writing.
Parameters
----------
df : Dataframe
Pandas dataframe to convert
id_col : str
column in dataframe to use as sequence label
sequence_col str:
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
alphabet :
biopython Alphabet object
Returns
-------
seq_records :
List of biopython seqrecords.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/write.py#L34-L93
| null |
__doc__ = """
Functions for write sequence data to sequence files.
"""
import pandas as pd
# Import Biopython
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import Bio.Alphabet
def _write_doc_template(schema):
s = """Write to {} format.
Parameters
----------
filename : str
File to write {} string to. If no filename is given, a {} string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
""".format(schema, schema, schema)
return s
def pandas_series_to_biopython_seqrecord(
series,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None
):
"""Convert pandas series to biopython seqrecord for easy writing.
Parameters
----------
series : Series
Pandas series to convert
id_col : str
column in dataframe to use as sequence label
sequence_col : str
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
Returns
-------
seq_records :
List of biopython seqrecords.
"""
# Get sequence
seq = Seq(series[sequence_col], alphabet=alphabet)
# Get id
id = series[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([series[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records = [record]
return seq_records
def _write(
data,
filename=None,
schema='fasta',
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
"""General write function. Write phylopanda data to biopython format.
Parameters
----------
filename : str
File to write string to. If no filename is given, a string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
# Build a list of records from a pandas DataFrame
if type(data) is pd.DataFrame:
seq_records = pandas_df_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Build a record from a pandas Series
elif type(data) is pd.Series:
seq_records = pandas_series_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Write to disk or return string
if filename is not None:
SeqIO.write(seq_records, filename, format=schema, **kwargs)
else:
return "".join([s.format(schema) for s in seq_records])
def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method
def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func
# Write functions to various formats.
to_fasta = _write_function('fasta')
to_phylip = _write_function('phylip')
to_clustal = _write_function('clustal')
to_embl = _write_function('embl')
to_nexus_seq = _write_function('nexus')
to_swiss = _write_function('swiss')
to_fastq = _write_function('fastq')
|
Zsailer/phylopandas
|
phylopandas/seqio/write.py
|
pandas_series_to_biopython_seqrecord
|
python
|
def pandas_series_to_biopython_seqrecord(
series,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None
):
# Get sequence
seq = Seq(series[sequence_col], alphabet=alphabet)
# Get id
id = series[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([series[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records = [record]
return seq_records
|
Convert pandas series to biopython seqrecord for easy writing.
Parameters
----------
series : Series
Pandas series to convert
id_col : str
column in dataframe to use as sequence label
sequence_col : str
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
Returns
-------
seq_records :
List of biopython seqrecords.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/write.py#L95-L142
| null |
__doc__ = """
Functions for write sequence data to sequence files.
"""
import pandas as pd
# Import Biopython
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import Bio.Alphabet
def _write_doc_template(schema):
s = """Write to {} format.
Parameters
----------
filename : str
File to write {} string to. If no filename is given, a {} string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
""".format(schema, schema, schema)
return s
def pandas_df_to_biopython_seqrecord(
df,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
):
"""Convert pandas dataframe to biopython seqrecord for easy writing.
Parameters
----------
df : Dataframe
Pandas dataframe to convert
id_col : str
column in dataframe to use as sequence label
sequence_col str:
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
alphabet :
biopython Alphabet object
Returns
-------
seq_records :
List of biopython seqrecords.
"""
seq_records = []
for i, row in df.iterrows():
# Tries getting sequence data. If a TypeError at the seqrecord
# creation is thrown, it is assumed that this row does not contain
# sequence data and therefore the row is ignored.
try:
# Get sequence
seq = Seq(row[sequence_col], alphabet=alphabet)
# Get id
id = row[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([row[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records.append(record)
except TypeError:
pass
return seq_records
def _write(
data,
filename=None,
schema='fasta',
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
"""General write function. Write phylopanda data to biopython format.
Parameters
----------
filename : str
File to write string to. If no filename is given, a string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
# Build a list of records from a pandas DataFrame
if type(data) is pd.DataFrame:
seq_records = pandas_df_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Build a record from a pandas Series
elif type(data) is pd.Series:
seq_records = pandas_series_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Write to disk or return string
if filename is not None:
SeqIO.write(seq_records, filename, format=schema, **kwargs)
else:
return "".join([s.format(schema) for s in seq_records])
def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method
def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func
# Write functions to various formats.
to_fasta = _write_function('fasta')
to_phylip = _write_function('phylip')
to_clustal = _write_function('clustal')
to_embl = _write_function('embl')
to_nexus_seq = _write_function('nexus')
to_swiss = _write_function('swiss')
to_fastq = _write_function('fastq')
|
Zsailer/phylopandas
|
phylopandas/seqio/write.py
|
_write
|
python
|
def _write(
data,
filename=None,
schema='fasta',
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
# Build a list of records from a pandas DataFrame
if type(data) is pd.DataFrame:
seq_records = pandas_df_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Build a record from a pandas Series
elif type(data) is pd.Series:
seq_records = pandas_series_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Write to disk or return string
if filename is not None:
SeqIO.write(seq_records, filename, format=schema, **kwargs)
else:
return "".join([s.format(schema) for s in seq_records])
|
General write function. Write phylopanda data to biopython format.
Parameters
----------
filename : str
File to write string to. If no filename is given, a string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/write.py#L144-L207
|
[
"def pandas_df_to_biopython_seqrecord(\n df,\n id_col='uid',\n sequence_col='sequence',\n extra_data=None,\n alphabet=None,\n ):\n \"\"\"Convert pandas dataframe to biopython seqrecord for easy writing.\n\n Parameters\n ----------\n df : Dataframe\n Pandas dataframe to convert\n\n id_col : str\n column in dataframe to use as sequence label\n\n sequence_col str:\n column in dataframe to use as sequence data\n\n extra_data : list\n extra columns to use in sequence description line\n\n alphabet :\n biopython Alphabet object\n\n Returns\n -------\n seq_records :\n List of biopython seqrecords.\n \"\"\"\n seq_records = []\n\n for i, row in df.iterrows():\n # Tries getting sequence data. If a TypeError at the seqrecord\n # creation is thrown, it is assumed that this row does not contain\n # sequence data and therefore the row is ignored.\n try:\n # Get sequence\n seq = Seq(row[sequence_col], alphabet=alphabet)\n\n # Get id\n id = row[id_col]\n\n # Build a description\n description = \"\"\n if extra_data is not None:\n description = \" \".join([row[key] for key in extra_data])\n\n # Build a record\n record = SeqRecord(\n seq=seq,\n id=id,\n description=description,\n )\n seq_records.append(record)\n except TypeError:\n pass\n\n return seq_records\n",
"def pandas_series_to_biopython_seqrecord(\n series,\n id_col='uid',\n sequence_col='sequence',\n extra_data=None,\n alphabet=None\n ):\n \"\"\"Convert pandas series to biopython seqrecord for easy writing.\n\n Parameters\n ----------\n series : Series\n Pandas series to convert\n\n id_col : str\n column in dataframe to use as sequence label\n\n sequence_col : str\n column in dataframe to use as sequence data\n\n extra_data : list\n extra columns to use in sequence description line\n\n Returns\n -------\n seq_records :\n List of biopython seqrecords.\n \"\"\"\n # Get sequence\n seq = Seq(series[sequence_col], alphabet=alphabet)\n\n # Get id\n id = series[id_col]\n\n # Build a description\n description = \"\"\n if extra_data is not None:\n description = \" \".join([series[key] for key in extra_data])\n\n # Build a record\n record = SeqRecord(\n seq=seq,\n id=id,\n description=description,\n )\n\n seq_records = [record]\n return seq_records\n"
] |
__doc__ = """
Functions for write sequence data to sequence files.
"""
import pandas as pd
# Import Biopython
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import Bio.Alphabet
def _write_doc_template(schema):
s = """Write to {} format.
Parameters
----------
filename : str
File to write {} string to. If no filename is given, a {} string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
""".format(schema, schema, schema)
return s
def pandas_df_to_biopython_seqrecord(
df,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
):
"""Convert pandas dataframe to biopython seqrecord for easy writing.
Parameters
----------
df : Dataframe
Pandas dataframe to convert
id_col : str
column in dataframe to use as sequence label
sequence_col str:
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
alphabet :
biopython Alphabet object
Returns
-------
seq_records :
List of biopython seqrecords.
"""
seq_records = []
for i, row in df.iterrows():
# Tries getting sequence data. If a TypeError at the seqrecord
# creation is thrown, it is assumed that this row does not contain
# sequence data and therefore the row is ignored.
try:
# Get sequence
seq = Seq(row[sequence_col], alphabet=alphabet)
# Get id
id = row[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([row[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records.append(record)
except TypeError:
pass
return seq_records
def pandas_series_to_biopython_seqrecord(
series,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None
):
"""Convert pandas series to biopython seqrecord for easy writing.
Parameters
----------
series : Series
Pandas series to convert
id_col : str
column in dataframe to use as sequence label
sequence_col : str
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
Returns
-------
seq_records :
List of biopython seqrecords.
"""
# Get sequence
seq = Seq(series[sequence_col], alphabet=alphabet)
# Get id
id = series[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([series[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records = [record]
return seq_records
def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method
def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func
# Write functions to various formats.
to_fasta = _write_function('fasta')
to_phylip = _write_function('phylip')
to_clustal = _write_function('clustal')
to_embl = _write_function('embl')
to_nexus_seq = _write_function('nexus')
to_swiss = _write_function('swiss')
to_fastq = _write_function('fastq')
|
Zsailer/phylopandas
|
phylopandas/seqio/write.py
|
_write_method
|
python
|
def _write_method(schema):
def method(
self,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method
|
Add a write method for named schema to a class.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/write.py#L209-L234
|
[
"def _write_doc_template(schema):\n s = \"\"\"Write to {} format.\n\n Parameters\n ----------\n filename : str\n File to write {} string to. If no filename is given, a {} string\n will be returned.\n\n sequence_col : str (default='sequence')\n Sequence column name in DataFrame.\n\n id_col : str (default='id')\n ID column name in DataFrame\n\n id_only : bool (default=False)\n If True, use only the ID column to label sequences in fasta.\n \"\"\".format(schema, schema, schema)\n return s\n"
] |
__doc__ = """
Functions for write sequence data to sequence files.
"""
import pandas as pd
# Import Biopython
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import Bio.Alphabet
def _write_doc_template(schema):
s = """Write to {} format.
Parameters
----------
filename : str
File to write {} string to. If no filename is given, a {} string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
""".format(schema, schema, schema)
return s
def pandas_df_to_biopython_seqrecord(
df,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
):
"""Convert pandas dataframe to biopython seqrecord for easy writing.
Parameters
----------
df : Dataframe
Pandas dataframe to convert
id_col : str
column in dataframe to use as sequence label
sequence_col str:
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
alphabet :
biopython Alphabet object
Returns
-------
seq_records :
List of biopython seqrecords.
"""
seq_records = []
for i, row in df.iterrows():
# Tries getting sequence data. If a TypeError at the seqrecord
# creation is thrown, it is assumed that this row does not contain
# sequence data and therefore the row is ignored.
try:
# Get sequence
seq = Seq(row[sequence_col], alphabet=alphabet)
# Get id
id = row[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([row[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records.append(record)
except TypeError:
pass
return seq_records
def pandas_series_to_biopython_seqrecord(
series,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None
):
"""Convert pandas series to biopython seqrecord for easy writing.
Parameters
----------
series : Series
Pandas series to convert
id_col : str
column in dataframe to use as sequence label
sequence_col : str
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
Returns
-------
seq_records :
List of biopython seqrecords.
"""
# Get sequence
seq = Seq(series[sequence_col], alphabet=alphabet)
# Get id
id = series[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([series[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records = [record]
return seq_records
def _write(
data,
filename=None,
schema='fasta',
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
"""General write function. Write phylopanda data to biopython format.
Parameters
----------
filename : str
File to write string to. If no filename is given, a string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
# Build a list of records from a pandas DataFrame
if type(data) is pd.DataFrame:
seq_records = pandas_df_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Build a record from a pandas Series
elif type(data) is pd.Series:
seq_records = pandas_series_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Write to disk or return string
if filename is not None:
SeqIO.write(seq_records, filename, format=schema, **kwargs)
else:
return "".join([s.format(schema) for s in seq_records])
def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func
# Write functions to various formats.
to_fasta = _write_function('fasta')
to_phylip = _write_function('phylip')
to_clustal = _write_function('clustal')
to_embl = _write_function('embl')
to_nexus_seq = _write_function('nexus')
to_swiss = _write_function('swiss')
to_fastq = _write_function('fastq')
|
Zsailer/phylopandas
|
phylopandas/seqio/write.py
|
_write_function
|
python
|
def _write_function(schema):
def func(
data,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func
|
Add a write method for named schema to a class.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/write.py#L237-L262
|
[
"def _write_doc_template(schema):\n s = \"\"\"Write to {} format.\n\n Parameters\n ----------\n filename : str\n File to write {} string to. If no filename is given, a {} string\n will be returned.\n\n sequence_col : str (default='sequence')\n Sequence column name in DataFrame.\n\n id_col : str (default='id')\n ID column name in DataFrame\n\n id_only : bool (default=False)\n If True, use only the ID column to label sequences in fasta.\n \"\"\".format(schema, schema, schema)\n return s\n"
] |
__doc__ = """
Functions for write sequence data to sequence files.
"""
import pandas as pd
# Import Biopython
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import Bio.Alphabet
def _write_doc_template(schema):
s = """Write to {} format.
Parameters
----------
filename : str
File to write {} string to. If no filename is given, a {} string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
""".format(schema, schema, schema)
return s
def pandas_df_to_biopython_seqrecord(
df,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
):
"""Convert pandas dataframe to biopython seqrecord for easy writing.
Parameters
----------
df : Dataframe
Pandas dataframe to convert
id_col : str
column in dataframe to use as sequence label
sequence_col str:
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
alphabet :
biopython Alphabet object
Returns
-------
seq_records :
List of biopython seqrecords.
"""
seq_records = []
for i, row in df.iterrows():
# Tries getting sequence data. If a TypeError at the seqrecord
# creation is thrown, it is assumed that this row does not contain
# sequence data and therefore the row is ignored.
try:
# Get sequence
seq = Seq(row[sequence_col], alphabet=alphabet)
# Get id
id = row[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([row[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records.append(record)
except TypeError:
pass
return seq_records
def pandas_series_to_biopython_seqrecord(
series,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None
):
"""Convert pandas series to biopython seqrecord for easy writing.
Parameters
----------
series : Series
Pandas series to convert
id_col : str
column in dataframe to use as sequence label
sequence_col : str
column in dataframe to use as sequence data
extra_data : list
extra columns to use in sequence description line
Returns
-------
seq_records :
List of biopython seqrecords.
"""
# Get sequence
seq = Seq(series[sequence_col], alphabet=alphabet)
# Get id
id = series[id_col]
# Build a description
description = ""
if extra_data is not None:
description = " ".join([series[key] for key in extra_data])
# Build a record
record = SeqRecord(
seq=seq,
id=id,
description=description,
)
seq_records = [record]
return seq_records
def _write(
data,
filename=None,
schema='fasta',
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
"""General write function. Write phylopanda data to biopython format.
Parameters
----------
filename : str
File to write string to. If no filename is given, a string
will be returned.
sequence_col : str (default='sequence')
Sequence column name in DataFrame.
id_col : str (default='id')
ID column name in DataFrame
id_only : bool (default=False)
If True, use only the ID column to label sequences in fasta.
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
# Build a list of records from a pandas DataFrame
if type(data) is pd.DataFrame:
seq_records = pandas_df_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Build a record from a pandas Series
elif type(data) is pd.Series:
seq_records = pandas_series_to_biopython_seqrecord(
data,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
)
# Write to disk or return string
if filename is not None:
SeqIO.write(seq_records, filename, format=schema, **kwargs)
else:
return "".join([s.format(schema) for s in seq_records])
def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
id_col='uid',
sequence_col='sequence',
extra_data=None,
alphabet=None,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
id_col=id_col,
sequence_col=sequence_col,
extra_data=extra_data,
alphabet=alphabet,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method
# Write functions to various formats.
to_fasta = _write_function('fasta')
to_phylip = _write_function('phylip')
to_clustal = _write_function('clustal')
to_embl = _write_function('embl')
to_nexus_seq = _write_function('nexus')
to_swiss = _write_function('swiss')
to_fastq = _write_function('fastq')
|
Zsailer/phylopandas
|
phylopandas/seqio/read.py
|
_read
|
python
|
def _read(
filename,
schema,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
kwargs.update(alphabet=alphabet)
# Prepare DataFrame fields.
data = {
'id': [],
seq_label: [],
'description': [],
'label': []
}
if use_uids:
data['uid'] = []
# Parse Fasta file.
for i, s in enumerate(SeqIO.parse(filename, format=schema, **kwargs)):
data['id'].append(s.id)
data[seq_label].append(str(s.seq))
data['description'].append(s.description)
data['label'].append(s.name)
if use_uids:
data['uid'].append(get_random_id(10))
# Port to DataFrame.
return pd.DataFrame(data)
|
Use BioPython's sequence parsing module to convert any file format to
a Pandas DataFrame.
The resulting DataFrame has the following columns:
- name
- id
- description
- sequence
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/read.py#L37-L88
|
[
"def get_random_id(length):\n \"\"\"Generate a random, alpha-numerical id.\"\"\"\n alphabet = string.ascii_uppercase + string.ascii_lowercase + string.digits\n return ''.join(random.choice(alphabet) for _ in range(length))\n"
] |
__doc__ = """
Functions for reading sequence files into pandas DataFrame.
"""
# Imports
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Blast import NCBIXML
import Bio.Alphabet
# Import Phylopandas DataFrame
import pandas as pd
from ..utils import get_random_id
def _read_doc_template(schema):
s = """Read a {} file.
Construct a PhyloPandas DataFrame with columns:
- name
- id
- description
- sequence
Parameters
----------
filename : str
File name of {} file.
seq_label : str (default='sequence')
Sequence column name in DataFrame.
""".format(schema, schema, schema)
return s
def _read_method(schema):
"""Add a write method for named schema to a class.
"""
def func(
self,
filename,
seq_label='sequence',
alphabet=None,
combine_on='uid',
use_uids=True,
**kwargs):
# Use generic write class to write data.
df0 = self._data
df1 = _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
return df0.phylo.combine(df1, on=combine_on)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
def _read_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
filename,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
# Various read functions to various formats.
read_fasta = _read_function('fasta')
read_phylip = _read_function('phylip')
read_clustal = _read_function('clustal')
read_embl = _read_function('embl')
read_nexus_seq = _read_function('nexus')
read_swiss = _read_function('swiss')
read_fastq = _read_function('fastq')
read_phylip_sequential = _read_function('phylip-sequential')
read_phylip_relaxed = _read_function('phylip-relaxed')
def read_blast_xml(filename, **kwargs):
"""Read BLAST XML format."""
# Read file.
with open(filename, 'r') as f:
blast_record = NCBIXML.read(f)
# Prepare DataFrame fields.
data = {'accession': [],
'hit_def': [],
'hit_id': [],
'title': [],
'length': [],
'e_value': [],
'sequence': []}
# Get alignments from blast result.
for i, s in enumerate(blast_record.alignments):
data['accession'] = s.accession
data['hit_def'] = s.hit_def
data['hit_id'] = s.hit_id
data['title'] = s.title
data['length'] = s.length
data['e_value'] = s.hsps[0].expect
data['sequence'] = s.hsps[0].sbjct
# Port to DataFrame.
return pd.DataFrame(data)
|
Zsailer/phylopandas
|
phylopandas/seqio/read.py
|
_read_method
|
python
|
def _read_method(schema):
def func(
self,
filename,
seq_label='sequence',
alphabet=None,
combine_on='uid',
use_uids=True,
**kwargs):
# Use generic write class to write data.
df0 = self._data
df1 = _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
return df0.phylo.combine(df1, on=combine_on)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
|
Add a write method for named schema to a class.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/read.py#L91-L116
|
[
"def _read_doc_template(schema):\n s = \"\"\"Read a {} file.\n\n Construct a PhyloPandas DataFrame with columns:\n - name\n - id\n - description\n - sequence\n\n Parameters\n ----------\n filename : str\n File name of {} file.\n\n seq_label : str (default='sequence')\n Sequence column name in DataFrame.\n \"\"\".format(schema, schema, schema)\n return s\n"
] |
__doc__ = """
Functions for reading sequence files into pandas DataFrame.
"""
# Imports
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Blast import NCBIXML
import Bio.Alphabet
# Import Phylopandas DataFrame
import pandas as pd
from ..utils import get_random_id
def _read_doc_template(schema):
s = """Read a {} file.
Construct a PhyloPandas DataFrame with columns:
- name
- id
- description
- sequence
Parameters
----------
filename : str
File name of {} file.
seq_label : str (default='sequence')
Sequence column name in DataFrame.
""".format(schema, schema, schema)
return s
def _read(
filename,
schema,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
"""Use BioPython's sequence parsing module to convert any file format to
a Pandas DataFrame.
The resulting DataFrame has the following columns:
- name
- id
- description
- sequence
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
kwargs.update(alphabet=alphabet)
# Prepare DataFrame fields.
data = {
'id': [],
seq_label: [],
'description': [],
'label': []
}
if use_uids:
data['uid'] = []
# Parse Fasta file.
for i, s in enumerate(SeqIO.parse(filename, format=schema, **kwargs)):
data['id'].append(s.id)
data[seq_label].append(str(s.seq))
data['description'].append(s.description)
data['label'].append(s.name)
if use_uids:
data['uid'].append(get_random_id(10))
# Port to DataFrame.
return pd.DataFrame(data)
def _read_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
filename,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
# Various read functions to various formats.
read_fasta = _read_function('fasta')
read_phylip = _read_function('phylip')
read_clustal = _read_function('clustal')
read_embl = _read_function('embl')
read_nexus_seq = _read_function('nexus')
read_swiss = _read_function('swiss')
read_fastq = _read_function('fastq')
read_phylip_sequential = _read_function('phylip-sequential')
read_phylip_relaxed = _read_function('phylip-relaxed')
def read_blast_xml(filename, **kwargs):
"""Read BLAST XML format."""
# Read file.
with open(filename, 'r') as f:
blast_record = NCBIXML.read(f)
# Prepare DataFrame fields.
data = {'accession': [],
'hit_def': [],
'hit_id': [],
'title': [],
'length': [],
'e_value': [],
'sequence': []}
# Get alignments from blast result.
for i, s in enumerate(blast_record.alignments):
data['accession'] = s.accession
data['hit_def'] = s.hit_def
data['hit_id'] = s.hit_id
data['title'] = s.title
data['length'] = s.length
data['e_value'] = s.hsps[0].expect
data['sequence'] = s.hsps[0].sbjct
# Port to DataFrame.
return pd.DataFrame(data)
|
Zsailer/phylopandas
|
phylopandas/seqio/read.py
|
_read_function
|
python
|
def _read_function(schema):
def func(
filename,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
|
Add a write method for named schema to a class.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/read.py#L119-L139
|
[
"def _read_doc_template(schema):\n s = \"\"\"Read a {} file.\n\n Construct a PhyloPandas DataFrame with columns:\n - name\n - id\n - description\n - sequence\n\n Parameters\n ----------\n filename : str\n File name of {} file.\n\n seq_label : str (default='sequence')\n Sequence column name in DataFrame.\n \"\"\".format(schema, schema, schema)\n return s\n"
] |
__doc__ = """
Functions for reading sequence files into pandas DataFrame.
"""
# Imports
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Blast import NCBIXML
import Bio.Alphabet
# Import Phylopandas DataFrame
import pandas as pd
from ..utils import get_random_id
def _read_doc_template(schema):
s = """Read a {} file.
Construct a PhyloPandas DataFrame with columns:
- name
- id
- description
- sequence
Parameters
----------
filename : str
File name of {} file.
seq_label : str (default='sequence')
Sequence column name in DataFrame.
""".format(schema, schema, schema)
return s
def _read(
filename,
schema,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
"""Use BioPython's sequence parsing module to convert any file format to
a Pandas DataFrame.
The resulting DataFrame has the following columns:
- name
- id
- description
- sequence
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
kwargs.update(alphabet=alphabet)
# Prepare DataFrame fields.
data = {
'id': [],
seq_label: [],
'description': [],
'label': []
}
if use_uids:
data['uid'] = []
# Parse Fasta file.
for i, s in enumerate(SeqIO.parse(filename, format=schema, **kwargs)):
data['id'].append(s.id)
data[seq_label].append(str(s.seq))
data['description'].append(s.description)
data['label'].append(s.name)
if use_uids:
data['uid'].append(get_random_id(10))
# Port to DataFrame.
return pd.DataFrame(data)
def _read_method(schema):
"""Add a write method for named schema to a class.
"""
def func(
self,
filename,
seq_label='sequence',
alphabet=None,
combine_on='uid',
use_uids=True,
**kwargs):
# Use generic write class to write data.
df0 = self._data
df1 = _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
return df0.phylo.combine(df1, on=combine_on)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
# Various read functions to various formats.
read_fasta = _read_function('fasta')
read_phylip = _read_function('phylip')
read_clustal = _read_function('clustal')
read_embl = _read_function('embl')
read_nexus_seq = _read_function('nexus')
read_swiss = _read_function('swiss')
read_fastq = _read_function('fastq')
read_phylip_sequential = _read_function('phylip-sequential')
read_phylip_relaxed = _read_function('phylip-relaxed')
def read_blast_xml(filename, **kwargs):
"""Read BLAST XML format."""
# Read file.
with open(filename, 'r') as f:
blast_record = NCBIXML.read(f)
# Prepare DataFrame fields.
data = {'accession': [],
'hit_def': [],
'hit_id': [],
'title': [],
'length': [],
'e_value': [],
'sequence': []}
# Get alignments from blast result.
for i, s in enumerate(blast_record.alignments):
data['accession'] = s.accession
data['hit_def'] = s.hit_def
data['hit_id'] = s.hit_id
data['title'] = s.title
data['length'] = s.length
data['e_value'] = s.hsps[0].expect
data['sequence'] = s.hsps[0].sbjct
# Port to DataFrame.
return pd.DataFrame(data)
|
Zsailer/phylopandas
|
phylopandas/seqio/read.py
|
read_blast_xml
|
python
|
def read_blast_xml(filename, **kwargs):
# Read file.
with open(filename, 'r') as f:
blast_record = NCBIXML.read(f)
# Prepare DataFrame fields.
data = {'accession': [],
'hit_def': [],
'hit_id': [],
'title': [],
'length': [],
'e_value': [],
'sequence': []}
# Get alignments from blast result.
for i, s in enumerate(blast_record.alignments):
data['accession'] = s.accession
data['hit_def'] = s.hit_def
data['hit_id'] = s.hit_id
data['title'] = s.title
data['length'] = s.length
data['e_value'] = s.hsps[0].expect
data['sequence'] = s.hsps[0].sbjct
# Port to DataFrame.
return pd.DataFrame(data)
|
Read BLAST XML format.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/read.py#L154-L180
| null |
__doc__ = """
Functions for reading sequence files into pandas DataFrame.
"""
# Imports
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Blast import NCBIXML
import Bio.Alphabet
# Import Phylopandas DataFrame
import pandas as pd
from ..utils import get_random_id
def _read_doc_template(schema):
s = """Read a {} file.
Construct a PhyloPandas DataFrame with columns:
- name
- id
- description
- sequence
Parameters
----------
filename : str
File name of {} file.
seq_label : str (default='sequence')
Sequence column name in DataFrame.
""".format(schema, schema, schema)
return s
def _read(
filename,
schema,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
"""Use BioPython's sequence parsing module to convert any file format to
a Pandas DataFrame.
The resulting DataFrame has the following columns:
- name
- id
- description
- sequence
"""
# Check Alphabet if given
if alphabet is None:
alphabet = Bio.Alphabet.Alphabet()
elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']:
alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet))
else:
raise Exception(
"The alphabet is not recognized. Must be 'dna', 'rna', "
"'nucleotide', or 'protein'.")
kwargs.update(alphabet=alphabet)
# Prepare DataFrame fields.
data = {
'id': [],
seq_label: [],
'description': [],
'label': []
}
if use_uids:
data['uid'] = []
# Parse Fasta file.
for i, s in enumerate(SeqIO.parse(filename, format=schema, **kwargs)):
data['id'].append(s.id)
data[seq_label].append(str(s.seq))
data['description'].append(s.description)
data['label'].append(s.name)
if use_uids:
data['uid'].append(get_random_id(10))
# Port to DataFrame.
return pd.DataFrame(data)
def _read_method(schema):
"""Add a write method for named schema to a class.
"""
def func(
self,
filename,
seq_label='sequence',
alphabet=None,
combine_on='uid',
use_uids=True,
**kwargs):
# Use generic write class to write data.
df0 = self._data
df1 = _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
return df0.phylo.combine(df1, on=combine_on)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
def _read_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
filename,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
# Various read functions to various formats.
read_fasta = _read_function('fasta')
read_phylip = _read_function('phylip')
read_clustal = _read_function('clustal')
read_embl = _read_function('embl')
read_nexus_seq = _read_function('nexus')
read_swiss = _read_function('swiss')
read_fastq = _read_function('fastq')
read_phylip_sequential = _read_function('phylip-sequential')
read_phylip_relaxed = _read_function('phylip-relaxed')
|
Zsailer/phylopandas
|
phylopandas/treeio/write.py
|
_pandas_df_to_dendropy_tree
|
python
|
def _pandas_df_to_dendropy_tree(
df,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
):
if isinstance(taxon_col, str) is False:
raise Exception("taxon_col must be a string.")
if isinstance(node_col, str) is False:
raise Exception("taxon_col must be a string.")
# Construct a list of nodes from dataframe.
taxon_namespace = dendropy.TaxonNamespace()
nodes = {}
for idx in df.index:
# Get node data.
data = df.loc[idx]
# Get taxon for node (if leaf node).
taxon = None
if data['type'] == 'leaf':
taxon = dendropy.Taxon(label=data[taxon_col])
# Add annotations data.
for ann in taxon_annotations:
taxon.annotations.add_new(ann, data[ann])
taxon_namespace.add_taxon(taxon)
# Get label for node.
label = data[node_col]
# Get edge length.
edge_length = None
if branch_lengths is True:
edge_length = data['length']
# Build a node
n = dendropy.Node(
taxon=taxon,
label=label,
edge_length=edge_length
)
# Add node annotations
for ann in node_annotations:
n.annotations.add_new(ann, data[ann])
nodes[idx] = n
# Build branching pattern for nodes.
root = None
for idx, node in nodes.items():
# Get node data.
data = df.loc[idx]
# Get children nodes
children_idx = df[df['parent'] == data['id']].index
children_nodes = [nodes[i] for i in children_idx]
# Set child nodes
nodes[idx].set_child_nodes(children_nodes)
# Check if this is root.
if data['parent'] is None:
root = nodes[idx]
# Build tree.
tree = dendropy.Tree(
seed_node=root,
taxon_namespace=taxon_namespace
)
return tree
|
Turn a phylopandas dataframe into a dendropy tree.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/write.py#L31-L126
| null |
import pandas
import dendropy
def _write_doc_template(schema):
s = """Write to {} format.
Parameters
----------
filename : str
File to write {} string to. If no filename is given, a {} string
will be returned.
taxon_col : str (default='sequence')
Sequence column name in DataFrame.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (default='id')
ID column name in DataFrame
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool (default=False)
If True, use only the ID column to label sequences in fasta.
""".format(schema, schema, schema)
return s
def _pandas_df_to_dendropy_tree(
df,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
):
"""Turn a phylopandas dataframe into a dendropy tree.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
if isinstance(taxon_col, str) is False:
raise Exception("taxon_col must be a string.")
if isinstance(node_col, str) is False:
raise Exception("taxon_col must be a string.")
# Construct a list of nodes from dataframe.
taxon_namespace = dendropy.TaxonNamespace()
nodes = {}
for idx in df.index:
# Get node data.
data = df.loc[idx]
# Get taxon for node (if leaf node).
taxon = None
if data['type'] == 'leaf':
taxon = dendropy.Taxon(label=data[taxon_col])
# Add annotations data.
for ann in taxon_annotations:
taxon.annotations.add_new(ann, data[ann])
taxon_namespace.add_taxon(taxon)
# Get label for node.
label = data[node_col]
# Get edge length.
edge_length = None
if branch_lengths is True:
edge_length = data['length']
# Build a node
n = dendropy.Node(
taxon=taxon,
label=label,
edge_length=edge_length
)
# Add node annotations
for ann in node_annotations:
n.annotations.add_new(ann, data[ann])
nodes[idx] = n
# Build branching pattern for nodes.
root = None
for idx, node in nodes.items():
# Get node data.
data = df.loc[idx]
# Get children nodes
children_idx = df[df['parent'] == data['id']].index
children_nodes = [nodes[i] for i in children_idx]
# Set child nodes
nodes[idx].set_child_nodes(children_nodes)
# Check if this is root.
if data['parent'] is None:
root = nodes[idx]
# Build tree.
tree = dendropy.Tree(
seed_node=root,
taxon_namespace=taxon_namespace
)
return tree
def _write(
df,
filename=None,
schema='newick',
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs
):
"""Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
tree = _pandas_df_to_dendropy_tree(
df,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
# Write out format
print(schema)
if filename is not None:
tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs)
else:
return tree.as_string(schema=schema)
def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method
def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func
def to_dendropy(
data,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True):
return _pandas_df_to_dendropy_tree(
data,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
to_newick = _write_function('newick')
to_nexml = _write_function('nexml')
to_nexus_tree = _write_function('nexus')
|
Zsailer/phylopandas
|
phylopandas/treeio/write.py
|
_write
|
python
|
def _write(
df,
filename=None,
schema='newick',
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs
):
tree = _pandas_df_to_dendropy_tree(
df,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
# Write out format
print(schema)
if filename is not None:
tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs)
else:
return tree.as_string(schema=schema)
|
Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/write.py#L129-L182
|
[
"def _pandas_df_to_dendropy_tree(\n df,\n taxon_col='uid',\n taxon_annotations=[],\n node_col='uid',\n node_annotations=[],\n branch_lengths=True,\n ):\n \"\"\"Turn a phylopandas dataframe into a dendropy tree.\n\n Parameters\n ----------\n df : DataFrame\n DataFrame containing tree data.\n\n taxon_col : str (optional)\n Column in dataframe to label the taxon. If None, the index will be used.\n\n taxon_annotations : str\n List of columns to annotation in the tree taxon.\n\n node_col : str (optional)\n Column in dataframe to label the nodes. If None, the index will be used.\n\n node_annotations : str\n List of columns to annotation in the node taxon.\n\n branch_lengths : bool\n If True, inclues branch lengths.\n \"\"\"\n if isinstance(taxon_col, str) is False:\n raise Exception(\"taxon_col must be a string.\")\n\n if isinstance(node_col, str) is False:\n raise Exception(\"taxon_col must be a string.\")\n\n # Construct a list of nodes from dataframe.\n taxon_namespace = dendropy.TaxonNamespace()\n nodes = {}\n for idx in df.index:\n # Get node data.\n data = df.loc[idx]\n\n # Get taxon for node (if leaf node).\n taxon = None\n if data['type'] == 'leaf':\n taxon = dendropy.Taxon(label=data[taxon_col])\n # Add annotations data.\n for ann in taxon_annotations:\n taxon.annotations.add_new(ann, data[ann])\n taxon_namespace.add_taxon(taxon)\n\n # Get label for node.\n label = data[node_col]\n\n # Get edge length.\n edge_length = None\n if branch_lengths is True:\n edge_length = data['length']\n\n # Build a node\n n = dendropy.Node(\n taxon=taxon,\n label=label,\n edge_length=edge_length\n )\n\n # Add node annotations\n for ann in node_annotations:\n n.annotations.add_new(ann, data[ann])\n\n nodes[idx] = n\n\n # Build branching pattern for nodes.\n root = None\n for idx, node in nodes.items():\n # Get node data.\n data = df.loc[idx]\n\n # Get children nodes\n children_idx = df[df['parent'] == data['id']].index\n children_nodes = [nodes[i] for i in children_idx]\n\n # Set child nodes\n nodes[idx].set_child_nodes(children_nodes)\n\n # Check if this is root.\n if data['parent'] is None:\n root = nodes[idx]\n\n # Build tree.\n tree = dendropy.Tree(\n seed_node=root,\n taxon_namespace=taxon_namespace\n )\n return tree\n"
] |
import pandas
import dendropy
def _write_doc_template(schema):
s = """Write to {} format.
Parameters
----------
filename : str
File to write {} string to. If no filename is given, a {} string
will be returned.
taxon_col : str (default='sequence')
Sequence column name in DataFrame.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (default='id')
ID column name in DataFrame
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool (default=False)
If True, use only the ID column to label sequences in fasta.
""".format(schema, schema, schema)
return s
def _pandas_df_to_dendropy_tree(
df,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
):
"""Turn a phylopandas dataframe into a dendropy tree.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
if isinstance(taxon_col, str) is False:
raise Exception("taxon_col must be a string.")
if isinstance(node_col, str) is False:
raise Exception("taxon_col must be a string.")
# Construct a list of nodes from dataframe.
taxon_namespace = dendropy.TaxonNamespace()
nodes = {}
for idx in df.index:
# Get node data.
data = df.loc[idx]
# Get taxon for node (if leaf node).
taxon = None
if data['type'] == 'leaf':
taxon = dendropy.Taxon(label=data[taxon_col])
# Add annotations data.
for ann in taxon_annotations:
taxon.annotations.add_new(ann, data[ann])
taxon_namespace.add_taxon(taxon)
# Get label for node.
label = data[node_col]
# Get edge length.
edge_length = None
if branch_lengths is True:
edge_length = data['length']
# Build a node
n = dendropy.Node(
taxon=taxon,
label=label,
edge_length=edge_length
)
# Add node annotations
for ann in node_annotations:
n.annotations.add_new(ann, data[ann])
nodes[idx] = n
# Build branching pattern for nodes.
root = None
for idx, node in nodes.items():
# Get node data.
data = df.loc[idx]
# Get children nodes
children_idx = df[df['parent'] == data['id']].index
children_nodes = [nodes[i] for i in children_idx]
# Set child nodes
nodes[idx].set_child_nodes(children_nodes)
# Check if this is root.
if data['parent'] is None:
root = nodes[idx]
# Build tree.
tree = dendropy.Tree(
seed_node=root,
taxon_namespace=taxon_namespace
)
return tree
def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method
def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func
def to_dendropy(
data,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True):
return _pandas_df_to_dendropy_tree(
data,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
to_newick = _write_function('newick')
to_nexml = _write_function('nexml')
to_nexus_tree = _write_function('nexus')
|
Zsailer/phylopandas
|
phylopandas/treeio/write.py
|
_write_method
|
python
|
def _write_method(schema):
def method(
self,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method
|
Add a write method for named schema to a class.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/write.py#L185-L212
|
[
"def _write_doc_template(schema):\n s = \"\"\"Write to {} format.\n\n Parameters\n ----------\n filename : str\n File to write {} string to. If no filename is given, a {} string\n will be returned.\n\n taxon_col : str (default='sequence')\n Sequence column name in DataFrame.\n\n taxon_annotations : str\n List of columns to annotation in the tree taxon.\n\n node_col : str (default='id')\n ID column name in DataFrame\n\n node_annotations : str\n List of columns to annotation in the node taxon.\n\n branch_lengths : bool (default=False)\n If True, use only the ID column to label sequences in fasta.\n \"\"\".format(schema, schema, schema)\n return s\n"
] |
import pandas
import dendropy
def _write_doc_template(schema):
s = """Write to {} format.
Parameters
----------
filename : str
File to write {} string to. If no filename is given, a {} string
will be returned.
taxon_col : str (default='sequence')
Sequence column name in DataFrame.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (default='id')
ID column name in DataFrame
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool (default=False)
If True, use only the ID column to label sequences in fasta.
""".format(schema, schema, schema)
return s
def _pandas_df_to_dendropy_tree(
df,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
):
"""Turn a phylopandas dataframe into a dendropy tree.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
if isinstance(taxon_col, str) is False:
raise Exception("taxon_col must be a string.")
if isinstance(node_col, str) is False:
raise Exception("taxon_col must be a string.")
# Construct a list of nodes from dataframe.
taxon_namespace = dendropy.TaxonNamespace()
nodes = {}
for idx in df.index:
# Get node data.
data = df.loc[idx]
# Get taxon for node (if leaf node).
taxon = None
if data['type'] == 'leaf':
taxon = dendropy.Taxon(label=data[taxon_col])
# Add annotations data.
for ann in taxon_annotations:
taxon.annotations.add_new(ann, data[ann])
taxon_namespace.add_taxon(taxon)
# Get label for node.
label = data[node_col]
# Get edge length.
edge_length = None
if branch_lengths is True:
edge_length = data['length']
# Build a node
n = dendropy.Node(
taxon=taxon,
label=label,
edge_length=edge_length
)
# Add node annotations
for ann in node_annotations:
n.annotations.add_new(ann, data[ann])
nodes[idx] = n
# Build branching pattern for nodes.
root = None
for idx, node in nodes.items():
# Get node data.
data = df.loc[idx]
# Get children nodes
children_idx = df[df['parent'] == data['id']].index
children_nodes = [nodes[i] for i in children_idx]
# Set child nodes
nodes[idx].set_child_nodes(children_nodes)
# Check if this is root.
if data['parent'] is None:
root = nodes[idx]
# Build tree.
tree = dendropy.Tree(
seed_node=root,
taxon_namespace=taxon_namespace
)
return tree
def _write(
df,
filename=None,
schema='newick',
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs
):
"""Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
tree = _pandas_df_to_dendropy_tree(
df,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
# Write out format
print(schema)
if filename is not None:
tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs)
else:
return tree.as_string(schema=schema)
def _write_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
data,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func
def to_dendropy(
data,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True):
return _pandas_df_to_dendropy_tree(
data,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
to_newick = _write_function('newick')
to_nexml = _write_function('nexml')
to_nexus_tree = _write_function('nexus')
|
Zsailer/phylopandas
|
phylopandas/treeio/write.py
|
_write_function
|
python
|
def _write_function(schema):
def func(
data,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
func.__doc__ = _write_doc_template(schema)
return func
|
Add a write method for named schema to a class.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/treeio/write.py#L215-L242
|
[
"def _write_doc_template(schema):\n s = \"\"\"Write to {} format.\n\n Parameters\n ----------\n filename : str\n File to write {} string to. If no filename is given, a {} string\n will be returned.\n\n taxon_col : str (default='sequence')\n Sequence column name in DataFrame.\n\n taxon_annotations : str\n List of columns to annotation in the tree taxon.\n\n node_col : str (default='id')\n ID column name in DataFrame\n\n node_annotations : str\n List of columns to annotation in the node taxon.\n\n branch_lengths : bool (default=False)\n If True, use only the ID column to label sequences in fasta.\n \"\"\".format(schema, schema, schema)\n return s\n"
] |
import pandas
import dendropy
def _write_doc_template(schema):
s = """Write to {} format.
Parameters
----------
filename : str
File to write {} string to. If no filename is given, a {} string
will be returned.
taxon_col : str (default='sequence')
Sequence column name in DataFrame.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (default='id')
ID column name in DataFrame
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool (default=False)
If True, use only the ID column to label sequences in fasta.
""".format(schema, schema, schema)
return s
def _pandas_df_to_dendropy_tree(
df,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
):
"""Turn a phylopandas dataframe into a dendropy tree.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
if isinstance(taxon_col, str) is False:
raise Exception("taxon_col must be a string.")
if isinstance(node_col, str) is False:
raise Exception("taxon_col must be a string.")
# Construct a list of nodes from dataframe.
taxon_namespace = dendropy.TaxonNamespace()
nodes = {}
for idx in df.index:
# Get node data.
data = df.loc[idx]
# Get taxon for node (if leaf node).
taxon = None
if data['type'] == 'leaf':
taxon = dendropy.Taxon(label=data[taxon_col])
# Add annotations data.
for ann in taxon_annotations:
taxon.annotations.add_new(ann, data[ann])
taxon_namespace.add_taxon(taxon)
# Get label for node.
label = data[node_col]
# Get edge length.
edge_length = None
if branch_lengths is True:
edge_length = data['length']
# Build a node
n = dendropy.Node(
taxon=taxon,
label=label,
edge_length=edge_length
)
# Add node annotations
for ann in node_annotations:
n.annotations.add_new(ann, data[ann])
nodes[idx] = n
# Build branching pattern for nodes.
root = None
for idx, node in nodes.items():
# Get node data.
data = df.loc[idx]
# Get children nodes
children_idx = df[df['parent'] == data['id']].index
children_nodes = [nodes[i] for i in children_idx]
# Set child nodes
nodes[idx].set_child_nodes(children_nodes)
# Check if this is root.
if data['parent'] is None:
root = nodes[idx]
# Build tree.
tree = dendropy.Tree(
seed_node=root,
taxon_namespace=taxon_namespace
)
return tree
def _write(
df,
filename=None,
schema='newick',
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs
):
"""Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
Column in dataframe to label the taxon. If None, the index will be used.
taxon_annotations : str
List of columns to annotation in the tree taxon.
node_col : str (optional)
Column in dataframe to label the nodes. If None, the index will be used.
node_annotations : str
List of columns to annotation in the node taxon.
branch_lengths : bool
If True, inclues branch lengths.
"""
tree = _pandas_df_to_dendropy_tree(
df,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
# Write out format
print(schema)
if filename is not None:
tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs)
else:
return tree.as_string(schema=schema)
def _write_method(schema):
"""Add a write method for named schema to a class.
"""
def method(
self,
filename=None,
schema=schema,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs):
# Use generic write class to write data.
return _write(
self._data,
filename=filename,
schema=schema,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
**kwargs
)
# Update docs
method.__doc__ = _write_doc_template(schema)
return method
def to_dendropy(
data,
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True):
return _pandas_df_to_dendropy_tree(
data,
taxon_col=taxon_col,
taxon_annotations=taxon_annotations,
node_col=node_col,
node_annotations=node_annotations,
branch_lengths=branch_lengths,
)
to_newick = _write_function('newick')
to_nexml = _write_function('nexml')
to_nexus_tree = _write_function('nexus')
|
Zsailer/phylopandas
|
phylopandas/utils.py
|
get_random_id
|
python
|
def get_random_id(length):
alphabet = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(alphabet) for _ in range(length))
|
Generate a random, alpha-numerical id.
|
train
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/utils.py#L4-L7
| null |
import random
import string
|
nyergler/hieroglyph
|
src/hieroglyph/directives.py
|
filter_doctree_for_slides
|
python
|
def filter_doctree_for_slides(doctree):
current = 0
num_children = len(doctree.children)
while current < num_children:
child = doctree.children[current]
child.replace_self(
child.traverse(no_autoslides_filter)
)
if len(doctree.children) == num_children:
# nothing removed, increment current
current += 1
else:
# a node was removed; retain current and update length
num_children = len(doctree.children)
|
Given a doctree, remove all non-slide related elements from it.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/directives.py#L309-L326
| null |
from docutils import nodes
from sphinx.util.nodes import set_source_info
from docutils.nodes import SkipNode
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives import (
admonitions,
)
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import Transform
def raiseSkip(self, node):
raise SkipNode()
class if_slides(nodes.Element):
pass
class IfBuildingSlides(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
if self.name in ('slides', 'notslides',):
import warnings
# these are deprecated, print a warning
warnings.warn(
"The %s directive has been deprecated; replace with if%s" % (
self.name, self.name,
),
stacklevel=2,
)
node = if_slides()
node.document = self.state.document
set_source_info(self, node)
node.attributes['ifslides'] = self.name in ('slides', 'ifslides',)
self.state.nested_parse(self.content, self.content_offset,
node, match_titles=1)
return [node]
class TransformSlideConditions(Transform):
default_priority = 550
def apply(self, *args, **kwargs):
app = self.document.settings.env.app
from hieroglyph import builder
need_reread = False
is_slides = builder.building_slides(app)
# this is a slide builder, remove notslides nodes
for node in self.document.traverse(if_slides):
need_reread = True
keep_content = is_slides == node.attributes.get('ifslides', False)
if keep_content:
node.replace_self(node.children)
else:
node.replace_self([])
if need_reread:
self.document.settings.env.note_reread()
class nextslide(nodes.Element):
def __repr__(self):
return 'nextslide: %s' % self.args
class NextSlideDirective(Directive):
has_content = False
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
'increment': directives.flag,
'classes': directives.class_option,
}
def run(self):
node = nextslide(**self.options)
node.args = self.arguments
node.state = self.state
node.document = self.state.document
set_source_info(self, node)
return [node]
class TransformNextSlides(Transform):
default_priority = 550
def apply(self, *args, **kwargs):
app = self.document.settings.env.app
from hieroglyph import builder
is_slides = builder.building_slides(app)
return self.apply_to_document(
self.document,
env=self.document.settings.env,
building_slides=is_slides,
)
def apply_to_document(self, document, env, building_slides):
need_reread = False
for node in document.traverse(nextslide):
need_reread = True
self.visit_nextslide(node, building_slides)
if need_reread:
env.note_reread()
def _make_title_node(self, node, increment=True):
"""Generate a new title node for ``node``.
``node`` is a ``nextslide`` node. The title will use the node's
parent's title, or the title specified as an argument.
"""
parent_title_node = node.parent.next_node(nodes.title)
nextslide_info = getattr(
parent_title_node, 'nextslide_info',
(parent_title_node.deepcopy().children, 1),
)
nextslide_info = (
nextslide_info[0],
nextslide_info[1] + 1,
)
if node.args:
textnodes, messages = node.state.inline_text(
node.args[0],
1,
)
new_title = nodes.title(node.args[0], '', *textnodes)
else:
title_nodes = nextslide_info[0][:]
if 'increment' in node.attributes:
title_nodes.append(
nodes.Text(' (%s)' % nextslide_info[1])
)
new_title = nodes.title(
'', '',
*title_nodes
)
new_title.nextslide_info = nextslide_info
return new_title
def visit_nextslide(self, node, building_slides):
index = node.parent.index(node)
if (not building_slides or
not node.parent.children[index+1:]):
node.parent.replace(node, [])
# nothing else to do
return
# figure out where to hoist the subsequent content to
parent = node.parent
grandparent = node.parent.parent
insertion_point = grandparent.index(node.parent) + 1
# truncate siblings, storing a reference to the rest of the
# content
new_children = parent.children[index+1:]
parent.children = parent.children[:index+1]
# create the next section
new_section = nodes.section()
new_section += self._make_title_node(node)
new_section.extend(new_children)
self.document.set_id(new_section)
# add classes, if needed
if node.get('classes'):
new_section['classes'].extend(node.get('classes'))
# attach the section and delete the nextslide node
grandparent.insert(insertion_point, new_section)
del node.parent[index]
class slideconf(nodes.Element):
def apply(self, builder):
"""Apply the Slide Configuration to a Builder."""
if 'theme' in self.attributes:
builder.apply_theme(
self.attributes['theme'],
builder.theme_options,
)
def restore(self, builder):
"""Restore the previous Slide Configuration for the Builder."""
if 'theme' in self.attributes:
builder.pop_theme()
@classmethod
def get(cls, doctree):
"""Return the first slideconf node for the doctree."""
conf_nodes = doctree.traverse(cls)
if conf_nodes:
return conf_nodes[0]
@classmethod
def get_conf(cls, builder, doctree=None):
"""Return a dictionary of slide configuration for this doctree."""
# set up the default conf
result = {
'theme': builder.config.slide_theme,
'autoslides': builder.config.autoslides,
'slide_classes': [],
}
# now look for a slideconf node in the doctree and update the conf
if doctree:
conf_node = cls.get(doctree)
if conf_node:
result.update(conf_node.attributes)
return result
def boolean_option(argument):
return str(argument.strip().lower()) in ('true', 'yes', '1')
class SlideConf(Directive):
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'theme': directives.unchanged,
'autoslides': boolean_option,
'slide_classes': directives.class_option,
}
def run(self):
node = slideconf(**self.options)
node.document = self.state.document
set_source_info(self, node)
return [node]
def no_autoslides_filter(node):
if isinstance(node, (if_slides, slideconf, slide)):
return True
if (isinstance(node, nodes.section) and
'include-as-slide' in node.attributes.get('classes', [])):
node.attributes['include-as-slide'] = True
remove_classes = ['include-as-slide']
# see if there's a slide-level class, too
for cls_name in node.attributes['classes']:
if cls_name.startswith('slide-level-'):
node.attributes['level'] = int(cls_name.rsplit('-', 1)[-1])
remove_classes.append(cls_name)
## for cls_name in remove_classes:
## node.attributes['classes'].remove(cls_name)
return True
return False
def process_slideconf_nodes(app, doctree, docname):
from hieroglyph import builder
is_slides = builder.building_slides(app)
# if autoslides is disabled and we're building slides,
# replace the document tree with only explicit slide nodes
if (is_slides and
not slideconf.get_conf(
app.builder, doctree)['autoslides']):
filter_doctree_for_slides(doctree)
class slide(nodes.admonition):
pass
class SlideDirective(admonitions.Admonition):
required_arguments = 0
optional_arguments = 1
node_class = slide
option_spec = {
'class': directives.class_option,
'name': directives.unchanged,
'level': directives.nonnegative_int,
'inline-contents': boolean_option,
}
def run(self):
# largely lifted from the superclass in order to make titles work
set_classes(self.options)
# self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text, **self.options)
self.add_name(admonition_node)
if self.arguments:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
else:
# no title, make something up so we have an ID
title_text = str(hash(' '.join(self.content)))
if not 'classes' in self.options:
admonition_node['classes'] += ['admonition-' +
nodes.make_id(title_text)]
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
def process_slide_nodes(app, doctree, docname):
from hieroglyph import builder
supports_slide_nodes = (
builder.building_slides(app) or
isinstance(app.builder, builder.AbstractInlineSlideBuilder)
)
if supports_slide_nodes:
return
# this builder does not understand slide nodes; remove them
for node in doctree.traverse(slide):
if node.attributes.get('inline-contents', False):
node.replace_self(node.children[1:])
else:
node.replace_self(nodes.inline())
|
nyergler/hieroglyph
|
src/hieroglyph/directives.py
|
TransformNextSlides._make_title_node
|
python
|
def _make_title_node(self, node, increment=True):
parent_title_node = node.parent.next_node(nodes.title)
nextslide_info = getattr(
parent_title_node, 'nextslide_info',
(parent_title_node.deepcopy().children, 1),
)
nextslide_info = (
nextslide_info[0],
nextslide_info[1] + 1,
)
if node.args:
textnodes, messages = node.state.inline_text(
node.args[0],
1,
)
new_title = nodes.title(node.args[0], '', *textnodes)
else:
title_nodes = nextslide_info[0][:]
if 'increment' in node.attributes:
title_nodes.append(
nodes.Text(' (%s)' % nextslide_info[1])
)
new_title = nodes.title(
'', '',
*title_nodes
)
new_title.nextslide_info = nextslide_info
return new_title
|
Generate a new title node for ``node``.
``node`` is a ``nextslide`` node. The title will use the node's
parent's title, or the title specified as an argument.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/directives.py#L137-L177
| null |
class TransformNextSlides(Transform):
default_priority = 550
def apply(self, *args, **kwargs):
app = self.document.settings.env.app
from hieroglyph import builder
is_slides = builder.building_slides(app)
return self.apply_to_document(
self.document,
env=self.document.settings.env,
building_slides=is_slides,
)
def apply_to_document(self, document, env, building_slides):
need_reread = False
for node in document.traverse(nextslide):
need_reread = True
self.visit_nextslide(node, building_slides)
if need_reread:
env.note_reread()
def visit_nextslide(self, node, building_slides):
index = node.parent.index(node)
if (not building_slides or
not node.parent.children[index+1:]):
node.parent.replace(node, [])
# nothing else to do
return
# figure out where to hoist the subsequent content to
parent = node.parent
grandparent = node.parent.parent
insertion_point = grandparent.index(node.parent) + 1
# truncate siblings, storing a reference to the rest of the
# content
new_children = parent.children[index+1:]
parent.children = parent.children[:index+1]
# create the next section
new_section = nodes.section()
new_section += self._make_title_node(node)
new_section.extend(new_children)
self.document.set_id(new_section)
# add classes, if needed
if node.get('classes'):
new_section['classes'].extend(node.get('classes'))
# attach the section and delete the nextslide node
grandparent.insert(insertion_point, new_section)
del node.parent[index]
|
nyergler/hieroglyph
|
src/hieroglyph/directives.py
|
slideconf.apply
|
python
|
def apply(self, builder):
if 'theme' in self.attributes:
builder.apply_theme(
self.attributes['theme'],
builder.theme_options,
)
|
Apply the Slide Configuration to a Builder.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/directives.py#L217-L224
| null |
class slideconf(nodes.Element):
def restore(self, builder):
"""Restore the previous Slide Configuration for the Builder."""
if 'theme' in self.attributes:
builder.pop_theme()
@classmethod
def get(cls, doctree):
"""Return the first slideconf node for the doctree."""
conf_nodes = doctree.traverse(cls)
if conf_nodes:
return conf_nodes[0]
@classmethod
def get_conf(cls, builder, doctree=None):
"""Return a dictionary of slide configuration for this doctree."""
# set up the default conf
result = {
'theme': builder.config.slide_theme,
'autoslides': builder.config.autoslides,
'slide_classes': [],
}
# now look for a slideconf node in the doctree and update the conf
if doctree:
conf_node = cls.get(doctree)
if conf_node:
result.update(conf_node.attributes)
return result
|
nyergler/hieroglyph
|
src/hieroglyph/directives.py
|
slideconf.get_conf
|
python
|
def get_conf(cls, builder, doctree=None):
# set up the default conf
result = {
'theme': builder.config.slide_theme,
'autoslides': builder.config.autoslides,
'slide_classes': [],
}
# now look for a slideconf node in the doctree and update the conf
if doctree:
conf_node = cls.get(doctree)
if conf_node:
result.update(conf_node.attributes)
return result
|
Return a dictionary of slide configuration for this doctree.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/directives.py#L241-L257
|
[
"def get(cls, doctree):\n \"\"\"Return the first slideconf node for the doctree.\"\"\"\n\n conf_nodes = doctree.traverse(cls)\n if conf_nodes:\n return conf_nodes[0]\n"
] |
class slideconf(nodes.Element):
def apply(self, builder):
"""Apply the Slide Configuration to a Builder."""
if 'theme' in self.attributes:
builder.apply_theme(
self.attributes['theme'],
builder.theme_options,
)
def restore(self, builder):
"""Restore the previous Slide Configuration for the Builder."""
if 'theme' in self.attributes:
builder.pop_theme()
@classmethod
def get(cls, doctree):
"""Return the first slideconf node for the doctree."""
conf_nodes = doctree.traverse(cls)
if conf_nodes:
return conf_nodes[0]
@classmethod
|
nyergler/hieroglyph
|
src/hieroglyph/slides.py
|
__fix_context
|
python
|
def __fix_context(context):
COPY_LISTS = ('script_files', 'css_files',)
for attr in COPY_LISTS:
if attr in context:
context[attr] = context[attr][:]
return context
|
Return a new context dict based on original context.
The new context will be a copy of the original, and some mutable
members (such as script and css files) will also be copied to
prevent polluting shared context.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/slides.py#L4-L18
| null |
from hieroglyph.builder import building_slides
def get_extra_pages(app):
"""
"""
result = []
context = app.builder.globalcontext
for context_key in context:
if context_key.startswith('theme_extra_pages_'):
page_name = context_key.split('theme_extra_pages_')[-1]
result.append(
(page_name, context, context[context_key],)
)
return result
|
nyergler/hieroglyph
|
src/hieroglyph/quickstart.py
|
ask_user
|
python
|
def ask_user(d):
# Print welcome message
msg = bold('Welcome to the Hieroglyph %s quickstart utility.') % (
version(),
)
print(msg)
msg = """
This will ask questions for creating a Hieroglyph project, and then ask
some basic Sphinx questions.
"""
print(msg)
# set a few defaults that we don't usually care about for Hieroglyph
d.update({
'version': datetime.date.today().strftime('%Y.%m.%d'),
'release': datetime.date.today().strftime('%Y.%m.%d'),
'make_mode': True,
})
if 'project' not in d:
print('''
The presentation title will be included on the title slide.''')
sphinx.quickstart.do_prompt(d, 'project', 'Presentation title')
if 'author' not in d:
sphinx.quickstart.do_prompt(d, 'author', 'Author name(s)')
# slide_theme
theme_entrypoints = pkg_resources.iter_entry_points('hieroglyph.theme')
themes = [
t.load()
for t in theme_entrypoints
]
msg = """
Available themes:
"""
for theme in themes:
msg += '\n'.join([
bold(theme['name']),
theme['desc'],
'', '',
])
msg += """Which theme would you like to use?"""
print(msg)
sphinx.quickstart.do_prompt(
d, 'slide_theme', 'Slide Theme', themes[0]['name'],
sphinx.quickstart.choice(
*[t['name'] for t in themes]
),
)
# Ask original questions
print("")
sphinx.quickstart.ask_user(d)
|
Wrap sphinx.quickstart.ask_user, and add additional questions.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/quickstart.py#L16-L76
|
[
"def version():\n \"\"\"Return the installed package version.\"\"\"\n\n import pkg_resources\n\n return pkg_resources.get_distribution('hieroglyph').version\n"
] |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import datetime
import os
import pkg_resources
from sphinx import version_info as sphinx_version_info
import sphinx.quickstart
from sphinx.util.console import bold
from hieroglyph import version
def quickstart(path=None):
if sphinx_version_info < (1, 5, 0):
from . import quickstart_legacy
return quickstart_legacy.quickstart(path=path)
templatedir = os.path.join(os.path.dirname(__file__), 'templates')
d = sphinx.quickstart.DEFAULT_VALUE.copy()
d['extensions'] = ['hieroglyph']
d.update(dict(("ext_" + ext, False) for ext in sphinx.quickstart.EXTENSIONS))
if path:
d['path'] = path
ask_user(d)
sphinx.quickstart.generate(d, templatedir=templatedir)
def main():
parser = ArgumentParser(
description='Run hieroglyph -q to start a presentation',
)
parser.add_argument('-v', '--version', action='store_true',
help="Print current version of hieroglyph")
parser.add_argument('-q', '--quickstart', action='store_true',
help="Start a hieroglyph project")
parser.add_argument('path', nargs='?', default=None,
help='Output directory for new presentation.')
args = vars(parser.parse_args())
if (args['version']):
print(version())
elif (args['quickstart']):
quickstart(args['path'])
|
nyergler/hieroglyph
|
src/hieroglyph/writer.py
|
SlideData.get_slide_context
|
python
|
def get_slide_context(self):
return {
'title': self.title,
'level': self.level,
'content': self.content,
'classes': self.classes,
'slide_classes': self._filter_classes(exclude='content-'),
'content_classes': self._filter_classes(include='content-'),
'slide_number': self.slide_number,
'config': self._translator.builder.config,
'id': self.id,
}
|
Return the context dict for rendering this slide.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/writer.py#L83-L96
|
[
"def _filter_classes(self, include=None, exclude=None):\n\n classes = self.classes[:]\n if include is not None:\n classes = [\n c[len(include):] for c in classes\n if c.startswith(include)\n ]\n\n if exclude is not None:\n classes = [\n c for c in classes\n if not c.startswith(exclude)\n ]\n\n return classes\n"
] |
class SlideData(object):
def __init__(self, translator, **kwargs):
self._translator = translator
self.level = 0
self.title = ''
self.content = ''
self.classes = []
self.slide_number = 0
self.id = ''
for name, value in kwargs.items():
setattr(self, name, value)
def _filter_classes(self, include=None, exclude=None):
classes = self.classes[:]
if include is not None:
classes = [
c[len(include):] for c in classes
if c.startswith(include)
]
if exclude is not None:
classes = [
c for c in classes
if not c.startswith(exclude)
]
return classes
|
nyergler/hieroglyph
|
src/hieroglyph/writer.py
|
BaseSlideTranslator._add_slide_number
|
python
|
def _add_slide_number(self, slide_no):
if self.builder.config.slide_numbers:
self.body.append(
'\n<div class="slide-no">%s</div>\n' % (slide_no,),
)
|
Add the slide number to the output if enabled.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/writer.py#L127-L133
| null |
class BaseSlideTranslator(HTMLTranslator):
def __init__(self, *args, **kwargs):
HTMLTranslator.__init__(self, *args, **kwargs)
self.section_count = 0
self.body_stack = []
self.current_slide = None
self.slide_data = []
def push_body(self):
"""Push the current body onto the stack and create an empty one."""
self.body_stack.append(self.body)
self.body = []
def pop_body(self):
"""Replace the current body with the last one pushed to the stack."""
self.body = self.body_stack.pop()
def visit_slideconf(self, node):
pass
def depart_slideconf(self, node):
pass
def _add_slide_footer(self, slide_no):
"""Add the slide footer to the output if enabled."""
if self.builder.config.slide_footer:
self.body.append(
'\n<div class="slide-footer">%s</div>\n' % (
self.builder.config.slide_footer,
),
)
def visit_slide(self, node):
from hieroglyph import builder
slide_level = node.attributes.get('level', self.section_level)
if slide_level > self.builder.config.slide_levels:
# dummy for matching div's
self.body.append(
self.starttag(
node, 'div', CLASS='section level-%s' % slide_level)
)
node.tag_name = 'div'
else:
slide_conf = slideconf.get_conf(self.builder, node.document)
if (builder.building_slides(self.builder.app) and
slide_conf['autoslides'] and
isinstance(node.parent, nodes.section) and
not getattr(node.parent, 'closed', False)):
# we're building slides and creating slides from
# sections; close the previous section, if needed
self.depart_slide(node.parent)
# don't increment section_count until we've (potentially)
# closed the previous slide
self.section_count += 1
node.closed = False
classes = node.get('classes')
if not classes:
classes = slide_conf['slide_classes']
# self.body.append(
# self.starttag(
# node, 'article',
# CLASS='%s slide level-%s' % (
# ' '.join(classes),
# slide_level,
# ),
# )
# )
node.tag_name = 'article'
slide_id = node.get('ids')
if slide_id:
slide_id = slide_id[0]
else:
slide_id = ''
assert self.current_slide is None
self.current_slide = SlideData(
self,
id=slide_id,
level=slide_level,
classes=classes,
slide_number=self.section_count,
)
self.push_body()
def depart_slide(self, node):
if self.current_slide and not getattr(node, 'closed', False):
# mark the slide closed
node.closed = True
# self._add_slide_footer(self.section_count)
# self._add_slide_number(self.section_count)
# self.body.append(
# '\n</%s>\n' % getattr(node, 'tag_name', 'article')
# )
self.current_slide.content = ''.join(self.body)
self.pop_body()
rendered_slide = self.builder.templates.render(
'slide.html',
self.current_slide.get_slide_context(),
)
self.body.append(rendered_slide)
self.slide_data.append(self.current_slide)
self.current_slide = None
def visit_title(self, node):
self.push_body()
if (isinstance(node.parent, slide) or
node.parent.attributes.get('include-as-slide', False)):
slide_level = node.parent.attributes.get(
'level',
self.section_level)
level = max(
slide_level + self.initial_header_level - 1,
1,
)
self.current_slide.level = level
# tag = 'h%s' % level
# self.body.append(self.starttag(node, tag, ''))
# self.context.append('</%s>\n' % tag)
if self.current_slide and isinstance(node.parent, (nodes.section, slide)):
self.current_slide.title = node.astext().strip()
else:
HTMLTranslator.visit_title(self, node)
def depart_title(self, node):
if self.current_slide and isinstance(node.parent, (nodes.section, slide)):
self.current_slide.title = ''.join(self.body)
self.pop_body()
else:
HTMLTranslator.depart_title(self, node)
title = ''.join(self.body)
self.pop_body()
self.body.append(title)
def visit_block_quote(self, node):
quote_slide_tags = ['paragraph', 'attribution']
# see if this looks like a quote slide
if (len(node.children) <= 2 and
[c.tagname for c in node.children] == quote_slide_tags[:len(node.children)]):
# process this as a quote slide
# first child must be a paragraph, process it as a <q> element
p = node.children[0]
self.body.append(self.starttag(node, 'q'))
for text_item in p:
text_item.walkabout(self)
self.body.append('</q>\n')
# optional second child must be an attribution, processing as a <div>
# following the <q>
if len(node.children) > 1:
attr = node.children[1]
self.body.append(self.starttag(attr, 'div', CLASS="author"))
for text_item in attr:
text_item.walkabout(self)
self.body.append('</div>\n')
# skip all normal processing
raise nodes.SkipNode
else:
return HTMLTranslator.visit_block_quote(self, node)
|
nyergler/hieroglyph
|
src/hieroglyph/writer.py
|
BaseSlideTranslator._add_slide_footer
|
python
|
def _add_slide_footer(self, slide_no):
if self.builder.config.slide_footer:
self.body.append(
'\n<div class="slide-footer">%s</div>\n' % (
self.builder.config.slide_footer,
),
)
|
Add the slide footer to the output if enabled.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/writer.py#L135-L143
| null |
class BaseSlideTranslator(HTMLTranslator):
def __init__(self, *args, **kwargs):
HTMLTranslator.__init__(self, *args, **kwargs)
self.section_count = 0
self.body_stack = []
self.current_slide = None
self.slide_data = []
def push_body(self):
"""Push the current body onto the stack and create an empty one."""
self.body_stack.append(self.body)
self.body = []
def pop_body(self):
"""Replace the current body with the last one pushed to the stack."""
self.body = self.body_stack.pop()
def visit_slideconf(self, node):
pass
def depart_slideconf(self, node):
pass
def _add_slide_number(self, slide_no):
"""Add the slide number to the output if enabled."""
if self.builder.config.slide_numbers:
self.body.append(
'\n<div class="slide-no">%s</div>\n' % (slide_no,),
)
def visit_slide(self, node):
from hieroglyph import builder
slide_level = node.attributes.get('level', self.section_level)
if slide_level > self.builder.config.slide_levels:
# dummy for matching div's
self.body.append(
self.starttag(
node, 'div', CLASS='section level-%s' % slide_level)
)
node.tag_name = 'div'
else:
slide_conf = slideconf.get_conf(self.builder, node.document)
if (builder.building_slides(self.builder.app) and
slide_conf['autoslides'] and
isinstance(node.parent, nodes.section) and
not getattr(node.parent, 'closed', False)):
# we're building slides and creating slides from
# sections; close the previous section, if needed
self.depart_slide(node.parent)
# don't increment section_count until we've (potentially)
# closed the previous slide
self.section_count += 1
node.closed = False
classes = node.get('classes')
if not classes:
classes = slide_conf['slide_classes']
# self.body.append(
# self.starttag(
# node, 'article',
# CLASS='%s slide level-%s' % (
# ' '.join(classes),
# slide_level,
# ),
# )
# )
node.tag_name = 'article'
slide_id = node.get('ids')
if slide_id:
slide_id = slide_id[0]
else:
slide_id = ''
assert self.current_slide is None
self.current_slide = SlideData(
self,
id=slide_id,
level=slide_level,
classes=classes,
slide_number=self.section_count,
)
self.push_body()
def depart_slide(self, node):
if self.current_slide and not getattr(node, 'closed', False):
# mark the slide closed
node.closed = True
# self._add_slide_footer(self.section_count)
# self._add_slide_number(self.section_count)
# self.body.append(
# '\n</%s>\n' % getattr(node, 'tag_name', 'article')
# )
self.current_slide.content = ''.join(self.body)
self.pop_body()
rendered_slide = self.builder.templates.render(
'slide.html',
self.current_slide.get_slide_context(),
)
self.body.append(rendered_slide)
self.slide_data.append(self.current_slide)
self.current_slide = None
def visit_title(self, node):
self.push_body()
if (isinstance(node.parent, slide) or
node.parent.attributes.get('include-as-slide', False)):
slide_level = node.parent.attributes.get(
'level',
self.section_level)
level = max(
slide_level + self.initial_header_level - 1,
1,
)
self.current_slide.level = level
# tag = 'h%s' % level
# self.body.append(self.starttag(node, tag, ''))
# self.context.append('</%s>\n' % tag)
if self.current_slide and isinstance(node.parent, (nodes.section, slide)):
self.current_slide.title = node.astext().strip()
else:
HTMLTranslator.visit_title(self, node)
def depart_title(self, node):
if self.current_slide and isinstance(node.parent, (nodes.section, slide)):
self.current_slide.title = ''.join(self.body)
self.pop_body()
else:
HTMLTranslator.depart_title(self, node)
title = ''.join(self.body)
self.pop_body()
self.body.append(title)
def visit_block_quote(self, node):
quote_slide_tags = ['paragraph', 'attribution']
# see if this looks like a quote slide
if (len(node.children) <= 2 and
[c.tagname for c in node.children] == quote_slide_tags[:len(node.children)]):
# process this as a quote slide
# first child must be a paragraph, process it as a <q> element
p = node.children[0]
self.body.append(self.starttag(node, 'q'))
for text_item in p:
text_item.walkabout(self)
self.body.append('</q>\n')
# optional second child must be an attribution, processing as a <div>
# following the <q>
if len(node.children) > 1:
attr = node.children[1]
self.body.append(self.starttag(attr, 'div', CLASS="author"))
for text_item in attr:
text_item.walkabout(self)
self.body.append('</div>\n')
# skip all normal processing
raise nodes.SkipNode
else:
return HTMLTranslator.visit_block_quote(self, node)
|
nyergler/hieroglyph
|
src/hieroglyph/html.py
|
inspect_config
|
python
|
def inspect_config(app):
# avoid import cycles :/
from hieroglyph import writer
# only reconfigure Sphinx if we're generating HTML
if app.builder.name not in HTML_BUILDERS:
return
if app.config.slide_link_html_to_slides:
# add the slide theme dir as a Loader
app.builder.templates.loaders.append(
SphinxFileSystemLoader(
os.path.join(
os.path.dirname(__file__), 'themes', 'slides',
)
)
)
# add the "show slides" sidebar template
if not app.config.html_sidebars:
# no sidebars explicitly defined, mimic the old style
# behavior + slide links
app.config.html_sidebars = {
'**': [
'localtoc.html',
'relations.html',
'sourcelink.html',
SLIDELINK_TEMPLATE,
'searchbox.html',
],
}
else:
# sidebars defined, add the template if needed
included = False
for glob, templates in app.config.html_sidebars:
if SLIDELINK_TEMPLATE in templates:
included = True
break
if not included:
# the slidelink template was not included; append it
# to the list of sidebars for all templates
app.config.html_sidebars.setdefault('**', []).append(
SLIDELINK_TEMPLATE,
)
if app.config.slide_link_html_sections_to_slides:
# fix up the HTML Translator
if sphinx.version_info >= (1, 6, 0):
override_translator = type(
'SlideLinkTranslator',
(app.builder.get_translator_class(), object),
{
'depart_title': writer.depart_title,
},
)
app.set_translator(app.builder, override_translator)
else:
app.builder.translator_class = type(
'SlideLinkTranslator',
(app.builder.translator_class, object),
{
'depart_title': writer.depart_title,
},
)
|
Inspect the Sphinx configuration and update for slide-linking.
If links from HTML to slides are enabled, make sure the sidebar
configuration includes the template and add the necessary theme
directory as a loader so the sidebar template can be located.
If the sidebar configuration already includes ``slidelink.html``
(in any key), the configuration will not be changed. If the
configuration is not specified, we'll attempt to emulate what
Sphinx does by default.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/html.py#L12-L89
| null |
"""Support for interacting with HTML builds."""
import os
import sphinx
from sphinx.jinja2glue import SphinxFileSystemLoader
HTML_BUILDERS = ('html', 'dirhtml', 'singlehtml',)
SLIDELINK_TEMPLATE = 'slidelink.html'
def slide_path(builder, pagename=None):
"""Calculate the relative path to the Slides for pagename."""
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_relative_path,
pagename or builder.current_docname,
))
def html_path(builder, pagename=None):
"""Calculate the relative path to the Slides for pagename."""
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_html_relative_path,
pagename or builder.current_docname,
))
def add_link(app, pagename, templatename, context, doctree):
"""Add the slides link to the HTML context."""
# we can only show the slidelink if we can resolve the filename
context['show_slidelink'] = (
app.config.slide_link_html_to_slides and
hasattr(app.builder, 'get_outfilename')
)
if context['show_slidelink']:
context['slide_path'] = slide_path(app.builder, pagename)
|
nyergler/hieroglyph
|
src/hieroglyph/html.py
|
slide_path
|
python
|
def slide_path(builder, pagename=None):
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_relative_path,
pagename or builder.current_docname,
))
|
Calculate the relative path to the Slides for pagename.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/html.py#L92-L100
| null |
"""Support for interacting with HTML builds."""
import os
import sphinx
from sphinx.jinja2glue import SphinxFileSystemLoader
HTML_BUILDERS = ('html', 'dirhtml', 'singlehtml',)
SLIDELINK_TEMPLATE = 'slidelink.html'
def inspect_config(app):
"""Inspect the Sphinx configuration and update for slide-linking.
If links from HTML to slides are enabled, make sure the sidebar
configuration includes the template and add the necessary theme
directory as a loader so the sidebar template can be located.
If the sidebar configuration already includes ``slidelink.html``
(in any key), the configuration will not be changed. If the
configuration is not specified, we'll attempt to emulate what
Sphinx does by default.
"""
# avoid import cycles :/
from hieroglyph import writer
# only reconfigure Sphinx if we're generating HTML
if app.builder.name not in HTML_BUILDERS:
return
if app.config.slide_link_html_to_slides:
# add the slide theme dir as a Loader
app.builder.templates.loaders.append(
SphinxFileSystemLoader(
os.path.join(
os.path.dirname(__file__), 'themes', 'slides',
)
)
)
# add the "show slides" sidebar template
if not app.config.html_sidebars:
# no sidebars explicitly defined, mimic the old style
# behavior + slide links
app.config.html_sidebars = {
'**': [
'localtoc.html',
'relations.html',
'sourcelink.html',
SLIDELINK_TEMPLATE,
'searchbox.html',
],
}
else:
# sidebars defined, add the template if needed
included = False
for glob, templates in app.config.html_sidebars:
if SLIDELINK_TEMPLATE in templates:
included = True
break
if not included:
# the slidelink template was not included; append it
# to the list of sidebars for all templates
app.config.html_sidebars.setdefault('**', []).append(
SLIDELINK_TEMPLATE,
)
if app.config.slide_link_html_sections_to_slides:
# fix up the HTML Translator
if sphinx.version_info >= (1, 6, 0):
override_translator = type(
'SlideLinkTranslator',
(app.builder.get_translator_class(), object),
{
'depart_title': writer.depart_title,
},
)
app.set_translator(app.builder, override_translator)
else:
app.builder.translator_class = type(
'SlideLinkTranslator',
(app.builder.translator_class, object),
{
'depart_title': writer.depart_title,
},
)
def html_path(builder, pagename=None):
"""Calculate the relative path to the Slides for pagename."""
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_html_relative_path,
pagename or builder.current_docname,
))
def add_link(app, pagename, templatename, context, doctree):
"""Add the slides link to the HTML context."""
# we can only show the slidelink if we can resolve the filename
context['show_slidelink'] = (
app.config.slide_link_html_to_slides and
hasattr(app.builder, 'get_outfilename')
)
if context['show_slidelink']:
context['slide_path'] = slide_path(app.builder, pagename)
|
nyergler/hieroglyph
|
src/hieroglyph/html.py
|
html_path
|
python
|
def html_path(builder, pagename=None):
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_html_relative_path,
pagename or builder.current_docname,
))
|
Calculate the relative path to the Slides for pagename.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/html.py#L103-L111
| null |
"""Support for interacting with HTML builds."""
import os
import sphinx
from sphinx.jinja2glue import SphinxFileSystemLoader
HTML_BUILDERS = ('html', 'dirhtml', 'singlehtml',)
SLIDELINK_TEMPLATE = 'slidelink.html'
def inspect_config(app):
"""Inspect the Sphinx configuration and update for slide-linking.
If links from HTML to slides are enabled, make sure the sidebar
configuration includes the template and add the necessary theme
directory as a loader so the sidebar template can be located.
If the sidebar configuration already includes ``slidelink.html``
(in any key), the configuration will not be changed. If the
configuration is not specified, we'll attempt to emulate what
Sphinx does by default.
"""
# avoid import cycles :/
from hieroglyph import writer
# only reconfigure Sphinx if we're generating HTML
if app.builder.name not in HTML_BUILDERS:
return
if app.config.slide_link_html_to_slides:
# add the slide theme dir as a Loader
app.builder.templates.loaders.append(
SphinxFileSystemLoader(
os.path.join(
os.path.dirname(__file__), 'themes', 'slides',
)
)
)
# add the "show slides" sidebar template
if not app.config.html_sidebars:
# no sidebars explicitly defined, mimic the old style
# behavior + slide links
app.config.html_sidebars = {
'**': [
'localtoc.html',
'relations.html',
'sourcelink.html',
SLIDELINK_TEMPLATE,
'searchbox.html',
],
}
else:
# sidebars defined, add the template if needed
included = False
for glob, templates in app.config.html_sidebars:
if SLIDELINK_TEMPLATE in templates:
included = True
break
if not included:
# the slidelink template was not included; append it
# to the list of sidebars for all templates
app.config.html_sidebars.setdefault('**', []).append(
SLIDELINK_TEMPLATE,
)
if app.config.slide_link_html_sections_to_slides:
# fix up the HTML Translator
if sphinx.version_info >= (1, 6, 0):
override_translator = type(
'SlideLinkTranslator',
(app.builder.get_translator_class(), object),
{
'depart_title': writer.depart_title,
},
)
app.set_translator(app.builder, override_translator)
else:
app.builder.translator_class = type(
'SlideLinkTranslator',
(app.builder.translator_class, object),
{
'depart_title': writer.depart_title,
},
)
def slide_path(builder, pagename=None):
"""Calculate the relative path to the Slides for pagename."""
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_relative_path,
pagename or builder.current_docname,
))
def add_link(app, pagename, templatename, context, doctree):
"""Add the slides link to the HTML context."""
# we can only show the slidelink if we can resolve the filename
context['show_slidelink'] = (
app.config.slide_link_html_to_slides and
hasattr(app.builder, 'get_outfilename')
)
if context['show_slidelink']:
context['slide_path'] = slide_path(app.builder, pagename)
|
nyergler/hieroglyph
|
src/hieroglyph/html.py
|
add_link
|
python
|
def add_link(app, pagename, templatename, context, doctree):
# we can only show the slidelink if we can resolve the filename
context['show_slidelink'] = (
app.config.slide_link_html_to_slides and
hasattr(app.builder, 'get_outfilename')
)
if context['show_slidelink']:
context['slide_path'] = slide_path(app.builder, pagename)
|
Add the slides link to the HTML context.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/html.py#L114-L124
|
[
"def slide_path(builder, pagename=None):\n \"\"\"Calculate the relative path to the Slides for pagename.\"\"\"\n\n return builder.get_relative_uri(\n pagename or builder.current_docname,\n os.path.join(\n builder.app.config.slide_relative_path,\n pagename or builder.current_docname,\n ))\n"
] |
"""Support for interacting with HTML builds."""
import os
import sphinx
from sphinx.jinja2glue import SphinxFileSystemLoader
HTML_BUILDERS = ('html', 'dirhtml', 'singlehtml',)
SLIDELINK_TEMPLATE = 'slidelink.html'
def inspect_config(app):
"""Inspect the Sphinx configuration and update for slide-linking.
If links from HTML to slides are enabled, make sure the sidebar
configuration includes the template and add the necessary theme
directory as a loader so the sidebar template can be located.
If the sidebar configuration already includes ``slidelink.html``
(in any key), the configuration will not be changed. If the
configuration is not specified, we'll attempt to emulate what
Sphinx does by default.
"""
# avoid import cycles :/
from hieroglyph import writer
# only reconfigure Sphinx if we're generating HTML
if app.builder.name not in HTML_BUILDERS:
return
if app.config.slide_link_html_to_slides:
# add the slide theme dir as a Loader
app.builder.templates.loaders.append(
SphinxFileSystemLoader(
os.path.join(
os.path.dirname(__file__), 'themes', 'slides',
)
)
)
# add the "show slides" sidebar template
if not app.config.html_sidebars:
# no sidebars explicitly defined, mimic the old style
# behavior + slide links
app.config.html_sidebars = {
'**': [
'localtoc.html',
'relations.html',
'sourcelink.html',
SLIDELINK_TEMPLATE,
'searchbox.html',
],
}
else:
# sidebars defined, add the template if needed
included = False
for glob, templates in app.config.html_sidebars:
if SLIDELINK_TEMPLATE in templates:
included = True
break
if not included:
# the slidelink template was not included; append it
# to the list of sidebars for all templates
app.config.html_sidebars.setdefault('**', []).append(
SLIDELINK_TEMPLATE,
)
if app.config.slide_link_html_sections_to_slides:
# fix up the HTML Translator
if sphinx.version_info >= (1, 6, 0):
override_translator = type(
'SlideLinkTranslator',
(app.builder.get_translator_class(), object),
{
'depart_title': writer.depart_title,
},
)
app.set_translator(app.builder, override_translator)
else:
app.builder.translator_class = type(
'SlideLinkTranslator',
(app.builder.translator_class, object),
{
'depart_title': writer.depart_title,
},
)
def slide_path(builder, pagename=None):
"""Calculate the relative path to the Slides for pagename."""
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_relative_path,
pagename or builder.current_docname,
))
def html_path(builder, pagename=None):
"""Calculate the relative path to the Slides for pagename."""
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_html_relative_path,
pagename or builder.current_docname,
))
|
nyergler/hieroglyph
|
src/hieroglyph/builder.py
|
AbstractSlideBuilder.apply_theme
|
python
|
def apply_theme(self, themename, themeoptions):
# push the existing values onto the Stack
self._theme_stack.append(
(self.theme, self.theme_options)
)
theme_factory = HTMLThemeFactory(self.app)
theme_factory.load_additional_themes(self.get_builtin_theme_dirs() + self.config.slide_theme_path)
self.theme = theme_factory.create(themename)
self.theme_options = themeoptions.copy()
self.templates.init(self, self.theme)
self.templates.environment.filters['json'] = json.dumps
if self.theme not in self._additional_themes:
self._additional_themes.append(self.theme)
|
Apply a new theme to the document.
This will store the existing theme configuration and apply a new one.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/builder.py#L82-L103
|
[
"def load_additional_themes(self, paths):\n Theme.init_themes(self.app.confdir, paths)\n",
"def create(self, themename):\n return Theme(themename)\n",
"def get_builtin_theme_dirs(self):\n\n return [\n os.path.join(os.path.dirname(__file__), 'themes',)\n ]\n"
] |
class AbstractSlideBuilder(object):
format = 'slides'
add_permalinks = False
default_translator_class = writer.SlideTranslator
def init_translator_class(self):
"""Compatibility shim to support versions of Sphinx prior to 1.6."""
self.translator_class = self.default_translator_class
def get_builtin_theme_dirs(self):
return [
os.path.join(os.path.dirname(__file__), 'themes',)
]
def get_theme_config(self):
"""Return the configured theme name and options."""
return self.config.slide_theme, self.config.slide_theme_options
def get_theme_options(self):
"""Return a dict of theme options, combining defaults and overrides."""
overrides = self.get_theme_config()[1]
return self.theme.get_options(overrides)
def init_templates(self):
themename, themeoptions = self.get_theme_config()
self.create_template_bridge()
self._theme_stack = []
self._additional_themes = []
self.theme = self.theme_options = None
self.apply_theme(themename, themeoptions)
def pop_theme(self):
"""Disable the most recent theme, and restore its predecessor."""
self.theme, self.theme_options = self._theme_stack.pop()
def prepare_writing(self, docnames):
super(AbstractSlideBuilder, self).prepare_writing(docnames)
# override items in the global context if needed
if self.config.slide_title:
self.globalcontext['docstitle'] = self.config.slide_title
def get_doc_context(self, docname, body, metatags):
context = super(AbstractSlideBuilder, self).get_doc_context(
docname, body, metatags,
)
if self.theme:
context.update(dict(
style=self.theme.get_config('theme', 'stylesheet'),
))
return context
def write_doc(self, docname, doctree):
slideconf = directives.slideconf.get(doctree)
if slideconf:
slideconf.apply(self)
result = super(AbstractSlideBuilder, self).write_doc(docname, doctree)
if slideconf:
# restore the previous theme configuration
slideconf.restore(self)
return result
def post_process_images(self, doctree):
"""Pick the best candidate for all image URIs."""
super(AbstractSlideBuilder, self).post_process_images(doctree)
# figure out where this doctree is in relation to the srcdir
relative_base = (
['..'] *
doctree.attributes.get('source')[len(self.srcdir) + 1:].count('/')
)
for node in doctree.traverse(nodes.image):
if node.get('candidates') is None:
node['candidates'] = ('*',)
# fix up images with absolute paths
if node['uri'].startswith(self.outdir):
node['uri'] = '/'.join(
relative_base + [
node['uri'][len(self.outdir) + 1:]
]
)
def copy_static_files(self):
result = super(AbstractSlideBuilder, self).copy_static_files()
# add context items for search function used in searchtools.js_t
ctx = self.globalcontext.copy()
ctx.update(self.indexer.context_for_searchtool())
for theme in self._additional_themes[1:]:
themeentries = [os.path.join(themepath, 'static')
for themepath in theme.get_theme_dirs()[::-1]]
for entry in themeentries:
copy_static_entry(entry, os.path.join(self.outdir, '_static'),
self, ctx)
return result
|
nyergler/hieroglyph
|
src/hieroglyph/builder.py
|
AbstractSlideBuilder.post_process_images
|
python
|
def post_process_images(self, doctree):
super(AbstractSlideBuilder, self).post_process_images(doctree)
# figure out where this doctree is in relation to the srcdir
relative_base = (
['..'] *
doctree.attributes.get('source')[len(self.srcdir) + 1:].count('/')
)
for node in doctree.traverse(nodes.image):
if node.get('candidates') is None:
node['candidates'] = ('*',)
# fix up images with absolute paths
if node['uri'].startswith(self.outdir):
node['uri'] = '/'.join(
relative_base + [
node['uri'][len(self.outdir) + 1:]
]
)
|
Pick the best candidate for all image URIs.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/builder.py#L145-L167
| null |
class AbstractSlideBuilder(object):
format = 'slides'
add_permalinks = False
default_translator_class = writer.SlideTranslator
def init_translator_class(self):
"""Compatibility shim to support versions of Sphinx prior to 1.6."""
self.translator_class = self.default_translator_class
def get_builtin_theme_dirs(self):
return [
os.path.join(os.path.dirname(__file__), 'themes',)
]
def get_theme_config(self):
"""Return the configured theme name and options."""
return self.config.slide_theme, self.config.slide_theme_options
def get_theme_options(self):
"""Return a dict of theme options, combining defaults and overrides."""
overrides = self.get_theme_config()[1]
return self.theme.get_options(overrides)
def init_templates(self):
themename, themeoptions = self.get_theme_config()
self.create_template_bridge()
self._theme_stack = []
self._additional_themes = []
self.theme = self.theme_options = None
self.apply_theme(themename, themeoptions)
def apply_theme(self, themename, themeoptions):
"""Apply a new theme to the document.
This will store the existing theme configuration and apply a new one.
"""
# push the existing values onto the Stack
self._theme_stack.append(
(self.theme, self.theme_options)
)
theme_factory = HTMLThemeFactory(self.app)
theme_factory.load_additional_themes(self.get_builtin_theme_dirs() + self.config.slide_theme_path)
self.theme = theme_factory.create(themename)
self.theme_options = themeoptions.copy()
self.templates.init(self, self.theme)
self.templates.environment.filters['json'] = json.dumps
if self.theme not in self._additional_themes:
self._additional_themes.append(self.theme)
def pop_theme(self):
"""Disable the most recent theme, and restore its predecessor."""
self.theme, self.theme_options = self._theme_stack.pop()
def prepare_writing(self, docnames):
super(AbstractSlideBuilder, self).prepare_writing(docnames)
# override items in the global context if needed
if self.config.slide_title:
self.globalcontext['docstitle'] = self.config.slide_title
def get_doc_context(self, docname, body, metatags):
context = super(AbstractSlideBuilder, self).get_doc_context(
docname, body, metatags,
)
if self.theme:
context.update(dict(
style=self.theme.get_config('theme', 'stylesheet'),
))
return context
def write_doc(self, docname, doctree):
slideconf = directives.slideconf.get(doctree)
if slideconf:
slideconf.apply(self)
result = super(AbstractSlideBuilder, self).write_doc(docname, doctree)
if slideconf:
# restore the previous theme configuration
slideconf.restore(self)
return result
def copy_static_files(self):
result = super(AbstractSlideBuilder, self).copy_static_files()
# add context items for search function used in searchtools.js_t
ctx = self.globalcontext.copy()
ctx.update(self.indexer.context_for_searchtool())
for theme in self._additional_themes[1:]:
themeentries = [os.path.join(themepath, 'static')
for themepath in theme.get_theme_dirs()[::-1]]
for entry in themeentries:
copy_static_entry(entry, os.path.join(self.outdir, '_static'),
self, ctx)
return result
|
nyergler/hieroglyph
|
src/hieroglyph/themes/slides2/static/scripts/md/render.py
|
parse_metadata
|
python
|
def parse_metadata(section):
metadata = {}
metadata_lines = section.split('\n')
for line in metadata_lines:
colon_index = line.find(':')
if colon_index != -1:
key = line[:colon_index].strip()
val = line[colon_index + 1:].strip()
metadata[key] = val
return metadata
|
Given the first part of a slide, returns metadata associated with it.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/themes/slides2/static/scripts/md/render.py#L36-L47
| null |
#!/usr/bin/env python
import codecs
import re
import jinja2
import markdown
def process_slides():
with codecs.open('../../presentation-output.html', 'w', encoding='utf8') as outfile:
md = codecs.open('slides.md', encoding='utf8').read()
md_slides = md.split('\n---\n')
print('Compiled %s slides.' % len(md_slides))
slides = []
# Process each slide separately.
for md_slide in md_slides:
slide = {}
sections = md_slide.split('\n\n')
# Extract metadata at the beginning of the slide (look for key: value)
# pairs.
metadata_section = sections[0]
metadata = parse_metadata(metadata_section)
slide.update(metadata)
remainder_index = metadata and 1 or 0
# Get the content from the rest of the slide.
content_section = '\n\n'.join(sections[remainder_index:])
html = markdown.markdown(content_section)
slide['content'] = postprocess_html(html, metadata)
slides.append(slide)
template = jinja2.Template(open('base.html').read())
outfile.write(template.render(locals()))
def postprocess_html(html, metadata):
"""Returns processed HTML to fit into the slide template format."""
if metadata.get('build_lists') and metadata['build_lists'] == 'true':
html = html.replace('<ul>', '<ul class="build">')
html = html.replace('<ol>', '<ol class="build">')
return html
if __name__ == '__main__':
process_slides()
|
nyergler/hieroglyph
|
src/hieroglyph/themes/slides2/static/scripts/md/render.py
|
postprocess_html
|
python
|
def postprocess_html(html, metadata):
if metadata.get('build_lists') and metadata['build_lists'] == 'true':
html = html.replace('<ul>', '<ul class="build">')
html = html.replace('<ol>', '<ol class="build">')
return html
|
Returns processed HTML to fit into the slide template format.
|
train
|
https://github.com/nyergler/hieroglyph/blob/1ef062fad5060006566f8d6bd3b5a231ac7e0488/src/hieroglyph/themes/slides2/static/scripts/md/render.py#L49-L54
| null |
#!/usr/bin/env python
import codecs
import re
import jinja2
import markdown
def process_slides():
with codecs.open('../../presentation-output.html', 'w', encoding='utf8') as outfile:
md = codecs.open('slides.md', encoding='utf8').read()
md_slides = md.split('\n---\n')
print('Compiled %s slides.' % len(md_slides))
slides = []
# Process each slide separately.
for md_slide in md_slides:
slide = {}
sections = md_slide.split('\n\n')
# Extract metadata at the beginning of the slide (look for key: value)
# pairs.
metadata_section = sections[0]
metadata = parse_metadata(metadata_section)
slide.update(metadata)
remainder_index = metadata and 1 or 0
# Get the content from the rest of the slide.
content_section = '\n\n'.join(sections[remainder_index:])
html = markdown.markdown(content_section)
slide['content'] = postprocess_html(html, metadata)
slides.append(slide)
template = jinja2.Template(open('base.html').read())
outfile.write(template.render(locals()))
def parse_metadata(section):
"""Given the first part of a slide, returns metadata associated with it."""
metadata = {}
metadata_lines = section.split('\n')
for line in metadata_lines:
colon_index = line.find(':')
if colon_index != -1:
key = line[:colon_index].strip()
val = line[colon_index + 1:].strip()
metadata[key] = val
return metadata
if __name__ == '__main__':
process_slides()
|
idlesign/torrentool
|
torrentool/cli.py
|
info
|
python
|
def info(torrent_path):
my_torrent = Torrent.from_file(torrent_path)
size = my_torrent.total_size
click.secho('Name: %s' % my_torrent.name, fg='blue')
click.secho('Files:')
for file_tuple in my_torrent.files:
click.secho(file_tuple.name)
click.secho('Hash: %s' % my_torrent.info_hash, fg='blue')
click.secho('Size: %s (%s)' % (humanize_filesize(size), size), fg='blue')
click.secho('Magnet: %s' % my_torrent.get_magnet(), fg='yellow')
|
Print out information from .torrent file.
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/cli.py#L25-L39
|
[
"def humanize_filesize(bytes_size):\n \"\"\"Returns human readable filesize.\n\n :param int bytes_size:\n :rtype: str\n \"\"\"\n if not bytes_size:\n return '0 B'\n\n names = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')\n\n name_idx = int(math.floor(math.log(bytes_size, 1024)))\n size = round(bytes_size / math.pow(1024, name_idx), 2)\n\n return '%s %s' % (size, names[name_idx])\n",
"def get_magnet(self, detailed=True):\n \"\"\"Returns torrent magnet link, consisting of BTIH (BitTorrent Info Hash) URN\n anr optional other information.\n\n :param bool|list|tuple|set detailed:\n For boolean - whether additional info (such as trackers) should be included.\n For iterable - expected allowed parameter names:\n tr - trackers\n ws - webseeds\n\n \"\"\"\n result = 'magnet:?xt=urn:btih:' + self.info_hash\n\n def add_tr():\n urls = self.announce_urls\n if not urls:\n return\n\n trackers = []\n\n urls = urls[0] # Only primary announcers are enough.\n for url in urls:\n trackers.append(('tr', url))\n\n if trackers:\n return urlencode(trackers)\n\n def add_ws():\n webseeds = [('ws', url) for url in self.webseeds]\n if webseeds:\n return urlencode(webseeds)\n\n params_map = {\n 'tr': add_tr,\n 'ws': add_ws,\n }\n\n if detailed:\n details = []\n\n if isinstance(detailed, _ITERABLE_TYPES):\n requested_params = detailed\n else:\n requested_params = params_map.keys()\n\n for param in requested_params:\n param_val = params_map[param]()\n param_val and details.append(param_val)\n\n if details:\n result += '&%s' % '&'.join(details)\n\n return result\n",
"def from_file(cls, filepath):\n \"\"\"Alternative constructor to get Torrent object from file.\n\n :param str filepath:\n :rtype: Torrent\n \"\"\"\n torrent = cls(Bencode.read_file(filepath))\n torrent._filepath = filepath\n return torrent\n"
] |
from __future__ import division
import click
from os import path, getcwd
from . import VERSION
from .api import Torrent
from .utils import humanize_filesize, upload_to_cache_server, get_open_trackers_from_remote, \
get_open_trackers_from_local
from .exceptions import RemoteUploadError, RemoteDownloadError
@click.group()
@click.version_option(version='.'.join(map(str, VERSION)))
def start():
"""Torrentool command line utilities."""
@start.group()
def torrent():
"""Torrent-related commands."""
@torrent.command()
@click.argument('torrent_path', type=click.Path(exists=True, writable=False, dir_okay=False))
@torrent.command()
@click.argument('source', type=click.Path(exists=True, writable=False))
@click.option('--dest', default=getcwd, type=click.Path(file_okay=False), help='Destination path to put .torrent file into. Default: current directory.')
@click.option('--tracker', default=None, help='Tracker announce URL (multiple comma-separated values supported).')
@click.option('--open_trackers', default=False, is_flag=True, help='Add open trackers announce URLs.')
@click.option('--comment', default=None, help='Arbitrary comment.')
@click.option('--cache', default=False, is_flag=True, help='Upload file to torrent cache services.')
def create(source, dest, tracker, open_trackers, comment, cache):
"""Create torrent file from a single file or a directory."""
source_title = path.basename(source).replace('.', '_').replace(' ', '_')
dest = '%s.torrent' % path.join(dest, source_title)
click.secho('Creating torrent from %s ...' % source)
my_torrent = Torrent.create_from(source)
if comment:
my_torrent.comment = comment
urls = []
if tracker:
urls = tracker.split(',')
if open_trackers:
click.secho('Fetching an up-to-date open tracker list ...')
try:
urls.extend(get_open_trackers_from_remote())
except RemoteDownloadError:
click.secho('Failed. Using built-in open tracker list.', fg='red', err=True)
urls.extend(get_open_trackers_from_local())
if urls:
my_torrent.announce_urls = urls
my_torrent.to_file(dest)
click.secho('Torrent file created: %s' % dest, fg='green')
click.secho('Torrent info hash: %s' % my_torrent.info_hash, fg='blue')
if cache:
click.secho('Uploading to %s torrent cache service ...')
try:
result = upload_to_cache_server(dest)
click.secho('Cached torrent URL: %s' % result, fg='yellow')
except RemoteUploadError as e:
click.secho('Failed: %s' % e, fg='red', err=True)
def main():
start(obj={})
|
idlesign/torrentool
|
torrentool/cli.py
|
create
|
python
|
def create(source, dest, tracker, open_trackers, comment, cache):
source_title = path.basename(source).replace('.', '_').replace(' ', '_')
dest = '%s.torrent' % path.join(dest, source_title)
click.secho('Creating torrent from %s ...' % source)
my_torrent = Torrent.create_from(source)
if comment:
my_torrent.comment = comment
urls = []
if tracker:
urls = tracker.split(',')
if open_trackers:
click.secho('Fetching an up-to-date open tracker list ...')
try:
urls.extend(get_open_trackers_from_remote())
except RemoteDownloadError:
click.secho('Failed. Using built-in open tracker list.', fg='red', err=True)
urls.extend(get_open_trackers_from_local())
if urls:
my_torrent.announce_urls = urls
my_torrent.to_file(dest)
click.secho('Torrent file created: %s' % dest, fg='green')
click.secho('Torrent info hash: %s' % my_torrent.info_hash, fg='blue')
if cache:
click.secho('Uploading to %s torrent cache service ...')
try:
result = upload_to_cache_server(dest)
click.secho('Cached torrent URL: %s' % result, fg='yellow')
except RemoteUploadError as e:
click.secho('Failed: %s' % e, fg='red', err=True)
|
Create torrent file from a single file or a directory.
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/cli.py#L49-L90
|
[
"def upload_to_cache_server(fpath):\n \"\"\"Uploads .torrent file to a cache server.\n\n Returns upload file URL.\n\n :rtype: str\n \"\"\"\n url_base = 'http://torrage.info'\n url_upload = '%s/autoupload.php' % url_base\n url_download = '%s/torrent.php?h=' % url_base\n file_field = 'torrent'\n\n try:\n import requests\n\n response = requests.post(url_upload, files={file_field: open(fpath, 'rb')}, timeout=REMOTE_TIMEOUT)\n response.raise_for_status()\n\n info_cache = response.text\n return url_download + info_cache\n\n except (ImportError, requests.RequestException) as e:\n\n # Now trace is lost. `raise from` to consider.\n raise RemoteUploadError('Unable to upload to %s: %s' % (url_upload, e))\n",
"def get_open_trackers_from_local():\n \"\"\"Returns open trackers announce URLs list from local backup.\"\"\"\n with open(path.join(path.dirname(__file__), 'repo', OPEN_TRACKERS_FILENAME)) as f:\n open_trackers = map(str.strip, f.readlines())\n\n return list(open_trackers)\n",
"def get_open_trackers_from_remote():\n \"\"\"Returns open trackers announce URLs list from remote repo.\"\"\"\n\n url_base = 'https://raw.githubusercontent.com/idlesign/torrentool/master/torrentool/repo'\n url = '%s/%s' % (url_base, OPEN_TRACKERS_FILENAME)\n\n try:\n import requests\n\n response = requests.get(url, timeout=REMOTE_TIMEOUT)\n response.raise_for_status()\n\n open_trackers = response.text.splitlines()\n\n except (ImportError, requests.RequestException) as e:\n\n # Now trace is lost. `raise from` to consider.\n raise RemoteDownloadError('Unable to download from %s: %s' % (url, e))\n\n return open_trackers\n",
"def to_file(self, filepath=None):\n \"\"\"Writes Torrent object into file, either\n\n :param filepath:\n \"\"\"\n if filepath is None and self._filepath is None:\n raise TorrentError('Unable to save torrent to file: no filepath supplied.')\n\n if filepath is not None:\n self._filepath = filepath\n\n with open(self._filepath, mode='wb') as f:\n f.write(self.to_string())\n",
"def create_from(cls, src_path):\n \"\"\"Returns Torrent object created from a file or a directory.\n\n :param str src_path:\n :rtype: Torrent\n \"\"\"\n is_dir = isdir(src_path)\n target_files, size_data = cls._get_target_files_info(src_path)\n\n SIZE_MIN = 32768 # 32 KiB\n SIZE_DEFAULT = 262144 # 256 KiB\n SIZE_MAX = 1048576 # 1 MiB\n\n CHUNKS_MIN = 1000 # todo use those limits as advised\n CHUNKS_MAX = 2200\n\n size_piece = SIZE_MIN\n if size_data > SIZE_MIN:\n size_piece = SIZE_DEFAULT\n\n if size_piece > SIZE_MAX:\n size_piece = SIZE_MAX\n\n def read(filepath):\n with open(filepath, 'rb') as f:\n while True:\n chunk = f.read(size_piece - len(pieces_buffer))\n chunk_size = len(chunk)\n if chunk_size == 0:\n break\n yield chunk\n\n pieces = bytearray()\n pieces_buffer = bytearray()\n\n for fpath, _, _ in target_files:\n for chunk in read(fpath):\n pieces_buffer += chunk\n\n if len(pieces_buffer) == size_piece:\n pieces += sha1(pieces_buffer).digest()[:20]\n pieces_buffer = bytearray()\n\n if len(pieces_buffer):\n pieces += sha1(pieces_buffer).digest()[:20]\n pieces_buffer = bytearray()\n\n info = {\n 'name': basename(src_path),\n 'pieces': bytes(pieces),\n 'piece length': size_piece,\n }\n\n if is_dir:\n files = []\n\n for _, length, path in target_files:\n files.append({'length': length, 'path': path})\n\n info['files'] = files\n\n else:\n info['length'] = target_files[0][1]\n\n torrent = cls({'info': info})\n torrent.created_by = get_app_version()\n torrent.creation_date = datetime.utcnow()\n\n return torrent\n"
] |
from __future__ import division
import click
from os import path, getcwd
from . import VERSION
from .api import Torrent
from .utils import humanize_filesize, upload_to_cache_server, get_open_trackers_from_remote, \
get_open_trackers_from_local
from .exceptions import RemoteUploadError, RemoteDownloadError
@click.group()
@click.version_option(version='.'.join(map(str, VERSION)))
def start():
"""Torrentool command line utilities."""
@start.group()
def torrent():
"""Torrent-related commands."""
@torrent.command()
@click.argument('torrent_path', type=click.Path(exists=True, writable=False, dir_okay=False))
def info(torrent_path):
"""Print out information from .torrent file."""
my_torrent = Torrent.from_file(torrent_path)
size = my_torrent.total_size
click.secho('Name: %s' % my_torrent.name, fg='blue')
click.secho('Files:')
for file_tuple in my_torrent.files:
click.secho(file_tuple.name)
click.secho('Hash: %s' % my_torrent.info_hash, fg='blue')
click.secho('Size: %s (%s)' % (humanize_filesize(size), size), fg='blue')
click.secho('Magnet: %s' % my_torrent.get_magnet(), fg='yellow')
@torrent.command()
@click.argument('source', type=click.Path(exists=True, writable=False))
@click.option('--dest', default=getcwd, type=click.Path(file_okay=False), help='Destination path to put .torrent file into. Default: current directory.')
@click.option('--tracker', default=None, help='Tracker announce URL (multiple comma-separated values supported).')
@click.option('--open_trackers', default=False, is_flag=True, help='Add open trackers announce URLs.')
@click.option('--comment', default=None, help='Arbitrary comment.')
@click.option('--cache', default=False, is_flag=True, help='Upload file to torrent cache services.')
def main():
start(obj={})
|
idlesign/torrentool
|
torrentool/torrent.py
|
Torrent.files
|
python
|
def files(self):
files = []
info = self._struct.get('info')
if not info:
return files
if 'files' in info:
base = info['name']
for f in info['files']:
files.append(TorrentFile(join(base, *f['path']), f['length']))
else:
files.append(TorrentFile(info['name'], info['length']))
return files
|
Files in torrent.
List of namedtuples (filepath, size).
:rtype: list[TorrentFile]
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/torrent.py#L108-L130
| null |
class Torrent(object):
"""Represents a torrent file, and exposes utilities to work with it."""
_filepath = None
def __init__(self, dict_struct=None):
dict_struct = dict_struct or {'info': {}}
self._struct = dict_struct
def __str__(self):
return 'Torrent: %s' % self.name
announce_urls = property()
"""List of lists of tracker announce URLs."""
comment = property()
"""Optional. Free-form textual comments of the author."""
creation_date = property()
"""Optional. The creation time of the torrent, in standard UNIX epoch format. UTC."""
created_by = property()
"""Optional. Name and version of the program used to create the .torrent"""
private = property()
"""Optional. If True the client MUST publish its presence to get other peers
ONLY via the trackers explicitly described in the metainfo file. If False or is not present,
the client may obtain peer from other means, e.g. PEX peer exchange, dht.
"""
name = property()
"""Torrent name (title)."""
webseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also: Torrent.httpseeds
http://bittorrent.org/beps/bep_0019.html
"""
httpseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also and prefer Torrent.webseeds
http://bittorrent.org/beps/bep_0017.html
"""
def _list_getter(self, key):
return self._struct.get(key, [])
def _list_setter(self, key, val):
if val is None:
try:
del self._struct[key]
return
except KeyError:
return
if not isinstance(val, _ITERABLE_TYPES):
val = [val]
self._struct[key] = val
@webseeds.getter
def webseeds(self):
return self._list_getter('url-list')
@webseeds.setter
def webseeds(self, val):
self._list_setter('url-list', val)
@httpseeds.getter
def httpseeds(self):
return self._list_getter('httpseeds')
@httpseeds.setter
def httpseeds(self, val):
self._list_setter('httpseeds', val)
@property
@property
def total_size(self):
"""Total size of all files in torrent."""
return reduce(lambda prev, curr: prev + curr[1], self.files, 0)
@property
def info_hash(self):
"""Hash of torrent file info section. Also known as torrent hash."""
info = self._struct.get('info')
if not info:
return None
return sha1(Bencode.encode(info)).hexdigest()
@property
def magnet_link(self):
"""Magnet link using BTIH (BitTorrent Info Hash) URN."""
return self.get_magnet(detailed=False)
@announce_urls.getter
def announce_urls(self):
"""List of lists of announce (tracker) URLs.
First inner list is considered as primary announcers list,
the following lists as back-ups.
http://bittorrent.org/beps/bep_0012.html
"""
urls = self._struct.get('announce-list')
if not urls:
urls = self._struct.get('announce')
if not urls:
return []
urls = [[urls]]
return urls
@announce_urls.setter
def announce_urls(self, val):
self._struct['announce'] = ''
self._struct['announce-list'] = []
def set_single(val):
del self._struct['announce-list']
self._struct['announce'] = val
if isinstance(val, _ITERABLE_TYPES):
length = len(val)
if length:
if length == 1:
set_single(val[0])
else:
for item in val:
if not isinstance(item, _ITERABLE_TYPES):
item = [item]
self._struct['announce-list'].append(item)
self._struct['announce'] = val[0]
else:
set_single(val)
@comment.getter
def comment(self):
return self._struct.get('comment')
@comment.setter
def comment(self, val):
self._struct['comment'] = val
@creation_date.getter
def creation_date(self):
date = self._struct.get('creation date')
if date is not None:
date = datetime.utcfromtimestamp(int(date))
return date
@creation_date.setter
def creation_date(self, val):
self._struct['creation date'] = timegm(val.timetuple())
@created_by.getter
def created_by(self):
return self._struct.get('created by')
@created_by.setter
def created_by(self, val):
self._struct['created by'] = val
@private.getter
def private(self):
return self._struct.get('info', {}).get('private', False)
@private.setter
def private(self, val):
if not val:
try:
del self._struct['info']['private']
except KeyError:
pass
else:
self._struct['info']['private'] = 1
@name.getter
def name(self):
return self._struct.get('info', {}).get('name', None)
@name.setter
def name(self, val):
self._struct['info']['name'] = val
def get_magnet(self, detailed=True):
"""Returns torrent magnet link, consisting of BTIH (BitTorrent Info Hash) URN
anr optional other information.
:param bool|list|tuple|set detailed:
For boolean - whether additional info (such as trackers) should be included.
For iterable - expected allowed parameter names:
tr - trackers
ws - webseeds
"""
result = 'magnet:?xt=urn:btih:' + self.info_hash
def add_tr():
urls = self.announce_urls
if not urls:
return
trackers = []
urls = urls[0] # Only primary announcers are enough.
for url in urls:
trackers.append(('tr', url))
if trackers:
return urlencode(trackers)
def add_ws():
webseeds = [('ws', url) for url in self.webseeds]
if webseeds:
return urlencode(webseeds)
params_map = {
'tr': add_tr,
'ws': add_ws,
}
if detailed:
details = []
if isinstance(detailed, _ITERABLE_TYPES):
requested_params = detailed
else:
requested_params = params_map.keys()
for param in requested_params:
param_val = params_map[param]()
param_val and details.append(param_val)
if details:
result += '&%s' % '&'.join(details)
return result
def to_file(self, filepath=None):
"""Writes Torrent object into file, either
:param filepath:
"""
if filepath is None and self._filepath is None:
raise TorrentError('Unable to save torrent to file: no filepath supplied.')
if filepath is not None:
self._filepath = filepath
with open(self._filepath, mode='wb') as f:
f.write(self.to_string())
def to_string(self):
"""Returns bytes representing torrent file.
:param str encoding: Encoding used by strings in Torrent object.
:rtype: bytearray
"""
return Bencode.encode(self._struct)
@classmethod
def _get_target_files_info(cls, src_path):
src_path = u'%s' % src_path # Force walk() to return unicode names.
is_dir = isdir(src_path)
target_files = []
if is_dir:
for base, _, files in walk(src_path):
target_files.extend([join(base, fname) for fname in sorted(files)])
else:
target_files.append(src_path)
target_files_ = []
total_size = 0
for fpath in target_files:
file_size = getsize(fpath)
if not file_size:
continue
target_files_.append((fpath, file_size, normpath(fpath.replace(src_path, '')).strip(sep).split(sep)))
total_size += file_size
return target_files_, total_size
@classmethod
def create_from(cls, src_path):
"""Returns Torrent object created from a file or a directory.
:param str src_path:
:rtype: Torrent
"""
is_dir = isdir(src_path)
target_files, size_data = cls._get_target_files_info(src_path)
SIZE_MIN = 32768 # 32 KiB
SIZE_DEFAULT = 262144 # 256 KiB
SIZE_MAX = 1048576 # 1 MiB
CHUNKS_MIN = 1000 # todo use those limits as advised
CHUNKS_MAX = 2200
size_piece = SIZE_MIN
if size_data > SIZE_MIN:
size_piece = SIZE_DEFAULT
if size_piece > SIZE_MAX:
size_piece = SIZE_MAX
def read(filepath):
with open(filepath, 'rb') as f:
while True:
chunk = f.read(size_piece - len(pieces_buffer))
chunk_size = len(chunk)
if chunk_size == 0:
break
yield chunk
pieces = bytearray()
pieces_buffer = bytearray()
for fpath, _, _ in target_files:
for chunk in read(fpath):
pieces_buffer += chunk
if len(pieces_buffer) == size_piece:
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
if len(pieces_buffer):
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
info = {
'name': basename(src_path),
'pieces': bytes(pieces),
'piece length': size_piece,
}
if is_dir:
files = []
for _, length, path in target_files:
files.append({'length': length, 'path': path})
info['files'] = files
else:
info['length'] = target_files[0][1]
torrent = cls({'info': info})
torrent.created_by = get_app_version()
torrent.creation_date = datetime.utcnow()
return torrent
@classmethod
def from_string(cls, string):
"""Alternative constructor to get Torrent object from string.
:param str string:
:rtype: Torrent
"""
return cls(Bencode.read_string(string))
@classmethod
def from_file(cls, filepath):
"""Alternative constructor to get Torrent object from file.
:param str filepath:
:rtype: Torrent
"""
torrent = cls(Bencode.read_file(filepath))
torrent._filepath = filepath
return torrent
|
idlesign/torrentool
|
torrentool/torrent.py
|
Torrent.info_hash
|
python
|
def info_hash(self):
info = self._struct.get('info')
if not info:
return None
return sha1(Bencode.encode(info)).hexdigest()
|
Hash of torrent file info section. Also known as torrent hash.
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/torrent.py#L138-L145
|
[
"def encode(cls, value):\n \"\"\"Encodes a value into bencoded bytes.\n\n :param value: Python object to be encoded (str, int, list, dict).\n :param str val_encoding: Encoding used by strings in a given object.\n :rtype: bytes\n \"\"\"\n val_encoding = 'utf-8'\n\n def encode_str(v):\n try:\n v_enc = encode(v, val_encoding)\n\n except UnicodeDecodeError:\n if PY3:\n raise\n else:\n # Suppose bytestring\n v_enc = v\n\n prefix = encode('%s:' % len(v_enc), val_encoding)\n return prefix + v_enc\n\n def encode_(val):\n if isinstance(val, str_type):\n result = encode_str(val)\n\n elif isinstance(val, int_types):\n result = encode(('i%se' % val), val_encoding)\n\n elif isinstance(val, (list, set, tuple)):\n result = encode('l', val_encoding)\n for item in val:\n result += encode_(item)\n result += encode('e', val_encoding)\n\n elif isinstance(val, dict):\n result = encode('d', val_encoding)\n\n # Dictionaries are expected to be sorted by key.\n for k, v in OrderedDict(sorted(val.items(), key=itemgetter(0))).items():\n result += (encode_str(k) + encode_(v))\n\n result += encode('e', val_encoding)\n\n elif isinstance(val, byte_types):\n result = encode('%s:' % len(val), val_encoding)\n result += val\n\n else:\n raise BencodeEncodingError('Unable to encode `%s` %s' % (type(val), val))\n\n return result\n\n return encode_(value)\n"
] |
class Torrent(object):
"""Represents a torrent file, and exposes utilities to work with it."""
_filepath = None
def __init__(self, dict_struct=None):
dict_struct = dict_struct or {'info': {}}
self._struct = dict_struct
def __str__(self):
return 'Torrent: %s' % self.name
announce_urls = property()
"""List of lists of tracker announce URLs."""
comment = property()
"""Optional. Free-form textual comments of the author."""
creation_date = property()
"""Optional. The creation time of the torrent, in standard UNIX epoch format. UTC."""
created_by = property()
"""Optional. Name and version of the program used to create the .torrent"""
private = property()
"""Optional. If True the client MUST publish its presence to get other peers
ONLY via the trackers explicitly described in the metainfo file. If False or is not present,
the client may obtain peer from other means, e.g. PEX peer exchange, dht.
"""
name = property()
"""Torrent name (title)."""
webseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also: Torrent.httpseeds
http://bittorrent.org/beps/bep_0019.html
"""
httpseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also and prefer Torrent.webseeds
http://bittorrent.org/beps/bep_0017.html
"""
def _list_getter(self, key):
return self._struct.get(key, [])
def _list_setter(self, key, val):
if val is None:
try:
del self._struct[key]
return
except KeyError:
return
if not isinstance(val, _ITERABLE_TYPES):
val = [val]
self._struct[key] = val
@webseeds.getter
def webseeds(self):
return self._list_getter('url-list')
@webseeds.setter
def webseeds(self, val):
self._list_setter('url-list', val)
@httpseeds.getter
def httpseeds(self):
return self._list_getter('httpseeds')
@httpseeds.setter
def httpseeds(self, val):
self._list_setter('httpseeds', val)
@property
def files(self):
"""Files in torrent.
List of namedtuples (filepath, size).
:rtype: list[TorrentFile]
"""
files = []
info = self._struct.get('info')
if not info:
return files
if 'files' in info:
base = info['name']
for f in info['files']:
files.append(TorrentFile(join(base, *f['path']), f['length']))
else:
files.append(TorrentFile(info['name'], info['length']))
return files
@property
def total_size(self):
"""Total size of all files in torrent."""
return reduce(lambda prev, curr: prev + curr[1], self.files, 0)
@property
@property
def magnet_link(self):
"""Magnet link using BTIH (BitTorrent Info Hash) URN."""
return self.get_magnet(detailed=False)
@announce_urls.getter
def announce_urls(self):
"""List of lists of announce (tracker) URLs.
First inner list is considered as primary announcers list,
the following lists as back-ups.
http://bittorrent.org/beps/bep_0012.html
"""
urls = self._struct.get('announce-list')
if not urls:
urls = self._struct.get('announce')
if not urls:
return []
urls = [[urls]]
return urls
@announce_urls.setter
def announce_urls(self, val):
self._struct['announce'] = ''
self._struct['announce-list'] = []
def set_single(val):
del self._struct['announce-list']
self._struct['announce'] = val
if isinstance(val, _ITERABLE_TYPES):
length = len(val)
if length:
if length == 1:
set_single(val[0])
else:
for item in val:
if not isinstance(item, _ITERABLE_TYPES):
item = [item]
self._struct['announce-list'].append(item)
self._struct['announce'] = val[0]
else:
set_single(val)
@comment.getter
def comment(self):
return self._struct.get('comment')
@comment.setter
def comment(self, val):
self._struct['comment'] = val
@creation_date.getter
def creation_date(self):
date = self._struct.get('creation date')
if date is not None:
date = datetime.utcfromtimestamp(int(date))
return date
@creation_date.setter
def creation_date(self, val):
self._struct['creation date'] = timegm(val.timetuple())
@created_by.getter
def created_by(self):
return self._struct.get('created by')
@created_by.setter
def created_by(self, val):
self._struct['created by'] = val
@private.getter
def private(self):
return self._struct.get('info', {}).get('private', False)
@private.setter
def private(self, val):
if not val:
try:
del self._struct['info']['private']
except KeyError:
pass
else:
self._struct['info']['private'] = 1
@name.getter
def name(self):
return self._struct.get('info', {}).get('name', None)
@name.setter
def name(self, val):
self._struct['info']['name'] = val
def get_magnet(self, detailed=True):
"""Returns torrent magnet link, consisting of BTIH (BitTorrent Info Hash) URN
anr optional other information.
:param bool|list|tuple|set detailed:
For boolean - whether additional info (such as trackers) should be included.
For iterable - expected allowed parameter names:
tr - trackers
ws - webseeds
"""
result = 'magnet:?xt=urn:btih:' + self.info_hash
def add_tr():
urls = self.announce_urls
if not urls:
return
trackers = []
urls = urls[0] # Only primary announcers are enough.
for url in urls:
trackers.append(('tr', url))
if trackers:
return urlencode(trackers)
def add_ws():
webseeds = [('ws', url) for url in self.webseeds]
if webseeds:
return urlencode(webseeds)
params_map = {
'tr': add_tr,
'ws': add_ws,
}
if detailed:
details = []
if isinstance(detailed, _ITERABLE_TYPES):
requested_params = detailed
else:
requested_params = params_map.keys()
for param in requested_params:
param_val = params_map[param]()
param_val and details.append(param_val)
if details:
result += '&%s' % '&'.join(details)
return result
def to_file(self, filepath=None):
"""Writes Torrent object into file, either
:param filepath:
"""
if filepath is None and self._filepath is None:
raise TorrentError('Unable to save torrent to file: no filepath supplied.')
if filepath is not None:
self._filepath = filepath
with open(self._filepath, mode='wb') as f:
f.write(self.to_string())
def to_string(self):
"""Returns bytes representing torrent file.
:param str encoding: Encoding used by strings in Torrent object.
:rtype: bytearray
"""
return Bencode.encode(self._struct)
@classmethod
def _get_target_files_info(cls, src_path):
src_path = u'%s' % src_path # Force walk() to return unicode names.
is_dir = isdir(src_path)
target_files = []
if is_dir:
for base, _, files in walk(src_path):
target_files.extend([join(base, fname) for fname in sorted(files)])
else:
target_files.append(src_path)
target_files_ = []
total_size = 0
for fpath in target_files:
file_size = getsize(fpath)
if not file_size:
continue
target_files_.append((fpath, file_size, normpath(fpath.replace(src_path, '')).strip(sep).split(sep)))
total_size += file_size
return target_files_, total_size
@classmethod
def create_from(cls, src_path):
"""Returns Torrent object created from a file or a directory.
:param str src_path:
:rtype: Torrent
"""
is_dir = isdir(src_path)
target_files, size_data = cls._get_target_files_info(src_path)
SIZE_MIN = 32768 # 32 KiB
SIZE_DEFAULT = 262144 # 256 KiB
SIZE_MAX = 1048576 # 1 MiB
CHUNKS_MIN = 1000 # todo use those limits as advised
CHUNKS_MAX = 2200
size_piece = SIZE_MIN
if size_data > SIZE_MIN:
size_piece = SIZE_DEFAULT
if size_piece > SIZE_MAX:
size_piece = SIZE_MAX
def read(filepath):
with open(filepath, 'rb') as f:
while True:
chunk = f.read(size_piece - len(pieces_buffer))
chunk_size = len(chunk)
if chunk_size == 0:
break
yield chunk
pieces = bytearray()
pieces_buffer = bytearray()
for fpath, _, _ in target_files:
for chunk in read(fpath):
pieces_buffer += chunk
if len(pieces_buffer) == size_piece:
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
if len(pieces_buffer):
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
info = {
'name': basename(src_path),
'pieces': bytes(pieces),
'piece length': size_piece,
}
if is_dir:
files = []
for _, length, path in target_files:
files.append({'length': length, 'path': path})
info['files'] = files
else:
info['length'] = target_files[0][1]
torrent = cls({'info': info})
torrent.created_by = get_app_version()
torrent.creation_date = datetime.utcnow()
return torrent
@classmethod
def from_string(cls, string):
"""Alternative constructor to get Torrent object from string.
:param str string:
:rtype: Torrent
"""
return cls(Bencode.read_string(string))
@classmethod
def from_file(cls, filepath):
"""Alternative constructor to get Torrent object from file.
:param str filepath:
:rtype: Torrent
"""
torrent = cls(Bencode.read_file(filepath))
torrent._filepath = filepath
return torrent
|
idlesign/torrentool
|
torrentool/torrent.py
|
Torrent.announce_urls
|
python
|
def announce_urls(self):
urls = self._struct.get('announce-list')
if not urls:
urls = self._struct.get('announce')
if not urls:
return []
urls = [[urls]]
return urls
|
List of lists of announce (tracker) URLs.
First inner list is considered as primary announcers list,
the following lists as back-ups.
http://bittorrent.org/beps/bep_0012.html
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/torrent.py#L153-L170
|
[
"def set_single(val):\n del self._struct['announce-list']\n self._struct['announce'] = val\n"
] |
class Torrent(object):
"""Represents a torrent file, and exposes utilities to work with it."""
_filepath = None
def __init__(self, dict_struct=None):
dict_struct = dict_struct or {'info': {}}
self._struct = dict_struct
def __str__(self):
return 'Torrent: %s' % self.name
announce_urls = property()
"""List of lists of tracker announce URLs."""
comment = property()
"""Optional. Free-form textual comments of the author."""
creation_date = property()
"""Optional. The creation time of the torrent, in standard UNIX epoch format. UTC."""
created_by = property()
"""Optional. Name and version of the program used to create the .torrent"""
private = property()
"""Optional. If True the client MUST publish its presence to get other peers
ONLY via the trackers explicitly described in the metainfo file. If False or is not present,
the client may obtain peer from other means, e.g. PEX peer exchange, dht.
"""
name = property()
"""Torrent name (title)."""
webseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also: Torrent.httpseeds
http://bittorrent.org/beps/bep_0019.html
"""
httpseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also and prefer Torrent.webseeds
http://bittorrent.org/beps/bep_0017.html
"""
def _list_getter(self, key):
return self._struct.get(key, [])
def _list_setter(self, key, val):
if val is None:
try:
del self._struct[key]
return
except KeyError:
return
if not isinstance(val, _ITERABLE_TYPES):
val = [val]
self._struct[key] = val
@webseeds.getter
def webseeds(self):
return self._list_getter('url-list')
@webseeds.setter
def webseeds(self, val):
self._list_setter('url-list', val)
@httpseeds.getter
def httpseeds(self):
return self._list_getter('httpseeds')
@httpseeds.setter
def httpseeds(self, val):
self._list_setter('httpseeds', val)
@property
def files(self):
"""Files in torrent.
List of namedtuples (filepath, size).
:rtype: list[TorrentFile]
"""
files = []
info = self._struct.get('info')
if not info:
return files
if 'files' in info:
base = info['name']
for f in info['files']:
files.append(TorrentFile(join(base, *f['path']), f['length']))
else:
files.append(TorrentFile(info['name'], info['length']))
return files
@property
def total_size(self):
"""Total size of all files in torrent."""
return reduce(lambda prev, curr: prev + curr[1], self.files, 0)
@property
def info_hash(self):
"""Hash of torrent file info section. Also known as torrent hash."""
info = self._struct.get('info')
if not info:
return None
return sha1(Bencode.encode(info)).hexdigest()
@property
def magnet_link(self):
"""Magnet link using BTIH (BitTorrent Info Hash) URN."""
return self.get_magnet(detailed=False)
@announce_urls.getter
@announce_urls.setter
def announce_urls(self, val):
self._struct['announce'] = ''
self._struct['announce-list'] = []
def set_single(val):
del self._struct['announce-list']
self._struct['announce'] = val
if isinstance(val, _ITERABLE_TYPES):
length = len(val)
if length:
if length == 1:
set_single(val[0])
else:
for item in val:
if not isinstance(item, _ITERABLE_TYPES):
item = [item]
self._struct['announce-list'].append(item)
self._struct['announce'] = val[0]
else:
set_single(val)
@comment.getter
def comment(self):
return self._struct.get('comment')
@comment.setter
def comment(self, val):
self._struct['comment'] = val
@creation_date.getter
def creation_date(self):
date = self._struct.get('creation date')
if date is not None:
date = datetime.utcfromtimestamp(int(date))
return date
@creation_date.setter
def creation_date(self, val):
self._struct['creation date'] = timegm(val.timetuple())
@created_by.getter
def created_by(self):
return self._struct.get('created by')
@created_by.setter
def created_by(self, val):
self._struct['created by'] = val
@private.getter
def private(self):
return self._struct.get('info', {}).get('private', False)
@private.setter
def private(self, val):
if not val:
try:
del self._struct['info']['private']
except KeyError:
pass
else:
self._struct['info']['private'] = 1
@name.getter
def name(self):
return self._struct.get('info', {}).get('name', None)
@name.setter
def name(self, val):
self._struct['info']['name'] = val
def get_magnet(self, detailed=True):
"""Returns torrent magnet link, consisting of BTIH (BitTorrent Info Hash) URN
anr optional other information.
:param bool|list|tuple|set detailed:
For boolean - whether additional info (such as trackers) should be included.
For iterable - expected allowed parameter names:
tr - trackers
ws - webseeds
"""
result = 'magnet:?xt=urn:btih:' + self.info_hash
def add_tr():
urls = self.announce_urls
if not urls:
return
trackers = []
urls = urls[0] # Only primary announcers are enough.
for url in urls:
trackers.append(('tr', url))
if trackers:
return urlencode(trackers)
def add_ws():
webseeds = [('ws', url) for url in self.webseeds]
if webseeds:
return urlencode(webseeds)
params_map = {
'tr': add_tr,
'ws': add_ws,
}
if detailed:
details = []
if isinstance(detailed, _ITERABLE_TYPES):
requested_params = detailed
else:
requested_params = params_map.keys()
for param in requested_params:
param_val = params_map[param]()
param_val and details.append(param_val)
if details:
result += '&%s' % '&'.join(details)
return result
def to_file(self, filepath=None):
"""Writes Torrent object into file, either
:param filepath:
"""
if filepath is None and self._filepath is None:
raise TorrentError('Unable to save torrent to file: no filepath supplied.')
if filepath is not None:
self._filepath = filepath
with open(self._filepath, mode='wb') as f:
f.write(self.to_string())
def to_string(self):
"""Returns bytes representing torrent file.
:param str encoding: Encoding used by strings in Torrent object.
:rtype: bytearray
"""
return Bencode.encode(self._struct)
@classmethod
def _get_target_files_info(cls, src_path):
src_path = u'%s' % src_path # Force walk() to return unicode names.
is_dir = isdir(src_path)
target_files = []
if is_dir:
for base, _, files in walk(src_path):
target_files.extend([join(base, fname) for fname in sorted(files)])
else:
target_files.append(src_path)
target_files_ = []
total_size = 0
for fpath in target_files:
file_size = getsize(fpath)
if not file_size:
continue
target_files_.append((fpath, file_size, normpath(fpath.replace(src_path, '')).strip(sep).split(sep)))
total_size += file_size
return target_files_, total_size
@classmethod
def create_from(cls, src_path):
"""Returns Torrent object created from a file or a directory.
:param str src_path:
:rtype: Torrent
"""
is_dir = isdir(src_path)
target_files, size_data = cls._get_target_files_info(src_path)
SIZE_MIN = 32768 # 32 KiB
SIZE_DEFAULT = 262144 # 256 KiB
SIZE_MAX = 1048576 # 1 MiB
CHUNKS_MIN = 1000 # todo use those limits as advised
CHUNKS_MAX = 2200
size_piece = SIZE_MIN
if size_data > SIZE_MIN:
size_piece = SIZE_DEFAULT
if size_piece > SIZE_MAX:
size_piece = SIZE_MAX
def read(filepath):
with open(filepath, 'rb') as f:
while True:
chunk = f.read(size_piece - len(pieces_buffer))
chunk_size = len(chunk)
if chunk_size == 0:
break
yield chunk
pieces = bytearray()
pieces_buffer = bytearray()
for fpath, _, _ in target_files:
for chunk in read(fpath):
pieces_buffer += chunk
if len(pieces_buffer) == size_piece:
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
if len(pieces_buffer):
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
info = {
'name': basename(src_path),
'pieces': bytes(pieces),
'piece length': size_piece,
}
if is_dir:
files = []
for _, length, path in target_files:
files.append({'length': length, 'path': path})
info['files'] = files
else:
info['length'] = target_files[0][1]
torrent = cls({'info': info})
torrent.created_by = get_app_version()
torrent.creation_date = datetime.utcnow()
return torrent
@classmethod
def from_string(cls, string):
"""Alternative constructor to get Torrent object from string.
:param str string:
:rtype: Torrent
"""
return cls(Bencode.read_string(string))
@classmethod
def from_file(cls, filepath):
"""Alternative constructor to get Torrent object from file.
:param str filepath:
:rtype: Torrent
"""
torrent = cls(Bencode.read_file(filepath))
torrent._filepath = filepath
return torrent
|
idlesign/torrentool
|
torrentool/torrent.py
|
Torrent.get_magnet
|
python
|
def get_magnet(self, detailed=True):
result = 'magnet:?xt=urn:btih:' + self.info_hash
def add_tr():
urls = self.announce_urls
if not urls:
return
trackers = []
urls = urls[0] # Only primary announcers are enough.
for url in urls:
trackers.append(('tr', url))
if trackers:
return urlencode(trackers)
def add_ws():
webseeds = [('ws', url) for url in self.webseeds]
if webseeds:
return urlencode(webseeds)
params_map = {
'tr': add_tr,
'ws': add_ws,
}
if detailed:
details = []
if isinstance(detailed, _ITERABLE_TYPES):
requested_params = detailed
else:
requested_params = params_map.keys()
for param in requested_params:
param_val = params_map[param]()
param_val and details.append(param_val)
if details:
result += '&%s' % '&'.join(details)
return result
|
Returns torrent magnet link, consisting of BTIH (BitTorrent Info Hash) URN
anr optional other information.
:param bool|list|tuple|set detailed:
For boolean - whether additional info (such as trackers) should be included.
For iterable - expected allowed parameter names:
tr - trackers
ws - webseeds
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/torrent.py#L246-L298
| null |
class Torrent(object):
"""Represents a torrent file, and exposes utilities to work with it."""
_filepath = None
def __init__(self, dict_struct=None):
dict_struct = dict_struct or {'info': {}}
self._struct = dict_struct
def __str__(self):
return 'Torrent: %s' % self.name
announce_urls = property()
"""List of lists of tracker announce URLs."""
comment = property()
"""Optional. Free-form textual comments of the author."""
creation_date = property()
"""Optional. The creation time of the torrent, in standard UNIX epoch format. UTC."""
created_by = property()
"""Optional. Name and version of the program used to create the .torrent"""
private = property()
"""Optional. If True the client MUST publish its presence to get other peers
ONLY via the trackers explicitly described in the metainfo file. If False or is not present,
the client may obtain peer from other means, e.g. PEX peer exchange, dht.
"""
name = property()
"""Torrent name (title)."""
webseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also: Torrent.httpseeds
http://bittorrent.org/beps/bep_0019.html
"""
httpseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also and prefer Torrent.webseeds
http://bittorrent.org/beps/bep_0017.html
"""
def _list_getter(self, key):
return self._struct.get(key, [])
def _list_setter(self, key, val):
if val is None:
try:
del self._struct[key]
return
except KeyError:
return
if not isinstance(val, _ITERABLE_TYPES):
val = [val]
self._struct[key] = val
@webseeds.getter
def webseeds(self):
return self._list_getter('url-list')
@webseeds.setter
def webseeds(self, val):
self._list_setter('url-list', val)
@httpseeds.getter
def httpseeds(self):
return self._list_getter('httpseeds')
@httpseeds.setter
def httpseeds(self, val):
self._list_setter('httpseeds', val)
@property
def files(self):
"""Files in torrent.
List of namedtuples (filepath, size).
:rtype: list[TorrentFile]
"""
files = []
info = self._struct.get('info')
if not info:
return files
if 'files' in info:
base = info['name']
for f in info['files']:
files.append(TorrentFile(join(base, *f['path']), f['length']))
else:
files.append(TorrentFile(info['name'], info['length']))
return files
@property
def total_size(self):
"""Total size of all files in torrent."""
return reduce(lambda prev, curr: prev + curr[1], self.files, 0)
@property
def info_hash(self):
"""Hash of torrent file info section. Also known as torrent hash."""
info = self._struct.get('info')
if not info:
return None
return sha1(Bencode.encode(info)).hexdigest()
@property
def magnet_link(self):
"""Magnet link using BTIH (BitTorrent Info Hash) URN."""
return self.get_magnet(detailed=False)
@announce_urls.getter
def announce_urls(self):
"""List of lists of announce (tracker) URLs.
First inner list is considered as primary announcers list,
the following lists as back-ups.
http://bittorrent.org/beps/bep_0012.html
"""
urls = self._struct.get('announce-list')
if not urls:
urls = self._struct.get('announce')
if not urls:
return []
urls = [[urls]]
return urls
@announce_urls.setter
def announce_urls(self, val):
self._struct['announce'] = ''
self._struct['announce-list'] = []
def set_single(val):
del self._struct['announce-list']
self._struct['announce'] = val
if isinstance(val, _ITERABLE_TYPES):
length = len(val)
if length:
if length == 1:
set_single(val[0])
else:
for item in val:
if not isinstance(item, _ITERABLE_TYPES):
item = [item]
self._struct['announce-list'].append(item)
self._struct['announce'] = val[0]
else:
set_single(val)
@comment.getter
def comment(self):
return self._struct.get('comment')
@comment.setter
def comment(self, val):
self._struct['comment'] = val
@creation_date.getter
def creation_date(self):
date = self._struct.get('creation date')
if date is not None:
date = datetime.utcfromtimestamp(int(date))
return date
@creation_date.setter
def creation_date(self, val):
self._struct['creation date'] = timegm(val.timetuple())
@created_by.getter
def created_by(self):
return self._struct.get('created by')
@created_by.setter
def created_by(self, val):
self._struct['created by'] = val
@private.getter
def private(self):
return self._struct.get('info', {}).get('private', False)
@private.setter
def private(self, val):
if not val:
try:
del self._struct['info']['private']
except KeyError:
pass
else:
self._struct['info']['private'] = 1
@name.getter
def name(self):
return self._struct.get('info', {}).get('name', None)
@name.setter
def name(self, val):
self._struct['info']['name'] = val
def to_file(self, filepath=None):
"""Writes Torrent object into file, either
:param filepath:
"""
if filepath is None and self._filepath is None:
raise TorrentError('Unable to save torrent to file: no filepath supplied.')
if filepath is not None:
self._filepath = filepath
with open(self._filepath, mode='wb') as f:
f.write(self.to_string())
def to_string(self):
"""Returns bytes representing torrent file.
:param str encoding: Encoding used by strings in Torrent object.
:rtype: bytearray
"""
return Bencode.encode(self._struct)
@classmethod
def _get_target_files_info(cls, src_path):
src_path = u'%s' % src_path # Force walk() to return unicode names.
is_dir = isdir(src_path)
target_files = []
if is_dir:
for base, _, files in walk(src_path):
target_files.extend([join(base, fname) for fname in sorted(files)])
else:
target_files.append(src_path)
target_files_ = []
total_size = 0
for fpath in target_files:
file_size = getsize(fpath)
if not file_size:
continue
target_files_.append((fpath, file_size, normpath(fpath.replace(src_path, '')).strip(sep).split(sep)))
total_size += file_size
return target_files_, total_size
@classmethod
def create_from(cls, src_path):
"""Returns Torrent object created from a file or a directory.
:param str src_path:
:rtype: Torrent
"""
is_dir = isdir(src_path)
target_files, size_data = cls._get_target_files_info(src_path)
SIZE_MIN = 32768 # 32 KiB
SIZE_DEFAULT = 262144 # 256 KiB
SIZE_MAX = 1048576 # 1 MiB
CHUNKS_MIN = 1000 # todo use those limits as advised
CHUNKS_MAX = 2200
size_piece = SIZE_MIN
if size_data > SIZE_MIN:
size_piece = SIZE_DEFAULT
if size_piece > SIZE_MAX:
size_piece = SIZE_MAX
def read(filepath):
with open(filepath, 'rb') as f:
while True:
chunk = f.read(size_piece - len(pieces_buffer))
chunk_size = len(chunk)
if chunk_size == 0:
break
yield chunk
pieces = bytearray()
pieces_buffer = bytearray()
for fpath, _, _ in target_files:
for chunk in read(fpath):
pieces_buffer += chunk
if len(pieces_buffer) == size_piece:
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
if len(pieces_buffer):
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
info = {
'name': basename(src_path),
'pieces': bytes(pieces),
'piece length': size_piece,
}
if is_dir:
files = []
for _, length, path in target_files:
files.append({'length': length, 'path': path})
info['files'] = files
else:
info['length'] = target_files[0][1]
torrent = cls({'info': info})
torrent.created_by = get_app_version()
torrent.creation_date = datetime.utcnow()
return torrent
@classmethod
def from_string(cls, string):
"""Alternative constructor to get Torrent object from string.
:param str string:
:rtype: Torrent
"""
return cls(Bencode.read_string(string))
@classmethod
def from_file(cls, filepath):
"""Alternative constructor to get Torrent object from file.
:param str filepath:
:rtype: Torrent
"""
torrent = cls(Bencode.read_file(filepath))
torrent._filepath = filepath
return torrent
|
idlesign/torrentool
|
torrentool/torrent.py
|
Torrent.to_file
|
python
|
def to_file(self, filepath=None):
if filepath is None and self._filepath is None:
raise TorrentError('Unable to save torrent to file: no filepath supplied.')
if filepath is not None:
self._filepath = filepath
with open(self._filepath, mode='wb') as f:
f.write(self.to_string())
|
Writes Torrent object into file, either
:param filepath:
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/torrent.py#L300-L312
|
[
"def to_string(self):\n \"\"\"Returns bytes representing torrent file.\n\n :param str encoding: Encoding used by strings in Torrent object.\n :rtype: bytearray\n \"\"\"\n return Bencode.encode(self._struct)\n"
] |
class Torrent(object):
"""Represents a torrent file, and exposes utilities to work with it."""
_filepath = None
def __init__(self, dict_struct=None):
dict_struct = dict_struct or {'info': {}}
self._struct = dict_struct
def __str__(self):
return 'Torrent: %s' % self.name
announce_urls = property()
"""List of lists of tracker announce URLs."""
comment = property()
"""Optional. Free-form textual comments of the author."""
creation_date = property()
"""Optional. The creation time of the torrent, in standard UNIX epoch format. UTC."""
created_by = property()
"""Optional. Name and version of the program used to create the .torrent"""
private = property()
"""Optional. If True the client MUST publish its presence to get other peers
ONLY via the trackers explicitly described in the metainfo file. If False or is not present,
the client may obtain peer from other means, e.g. PEX peer exchange, dht.
"""
name = property()
"""Torrent name (title)."""
webseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also: Torrent.httpseeds
http://bittorrent.org/beps/bep_0019.html
"""
httpseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also and prefer Torrent.webseeds
http://bittorrent.org/beps/bep_0017.html
"""
def _list_getter(self, key):
return self._struct.get(key, [])
def _list_setter(self, key, val):
if val is None:
try:
del self._struct[key]
return
except KeyError:
return
if not isinstance(val, _ITERABLE_TYPES):
val = [val]
self._struct[key] = val
@webseeds.getter
def webseeds(self):
return self._list_getter('url-list')
@webseeds.setter
def webseeds(self, val):
self._list_setter('url-list', val)
@httpseeds.getter
def httpseeds(self):
return self._list_getter('httpseeds')
@httpseeds.setter
def httpseeds(self, val):
self._list_setter('httpseeds', val)
@property
def files(self):
"""Files in torrent.
List of namedtuples (filepath, size).
:rtype: list[TorrentFile]
"""
files = []
info = self._struct.get('info')
if not info:
return files
if 'files' in info:
base = info['name']
for f in info['files']:
files.append(TorrentFile(join(base, *f['path']), f['length']))
else:
files.append(TorrentFile(info['name'], info['length']))
return files
@property
def total_size(self):
"""Total size of all files in torrent."""
return reduce(lambda prev, curr: prev + curr[1], self.files, 0)
@property
def info_hash(self):
"""Hash of torrent file info section. Also known as torrent hash."""
info = self._struct.get('info')
if not info:
return None
return sha1(Bencode.encode(info)).hexdigest()
@property
def magnet_link(self):
"""Magnet link using BTIH (BitTorrent Info Hash) URN."""
return self.get_magnet(detailed=False)
@announce_urls.getter
def announce_urls(self):
"""List of lists of announce (tracker) URLs.
First inner list is considered as primary announcers list,
the following lists as back-ups.
http://bittorrent.org/beps/bep_0012.html
"""
urls = self._struct.get('announce-list')
if not urls:
urls = self._struct.get('announce')
if not urls:
return []
urls = [[urls]]
return urls
@announce_urls.setter
def announce_urls(self, val):
self._struct['announce'] = ''
self._struct['announce-list'] = []
def set_single(val):
del self._struct['announce-list']
self._struct['announce'] = val
if isinstance(val, _ITERABLE_TYPES):
length = len(val)
if length:
if length == 1:
set_single(val[0])
else:
for item in val:
if not isinstance(item, _ITERABLE_TYPES):
item = [item]
self._struct['announce-list'].append(item)
self._struct['announce'] = val[0]
else:
set_single(val)
@comment.getter
def comment(self):
return self._struct.get('comment')
@comment.setter
def comment(self, val):
self._struct['comment'] = val
@creation_date.getter
def creation_date(self):
date = self._struct.get('creation date')
if date is not None:
date = datetime.utcfromtimestamp(int(date))
return date
@creation_date.setter
def creation_date(self, val):
self._struct['creation date'] = timegm(val.timetuple())
@created_by.getter
def created_by(self):
return self._struct.get('created by')
@created_by.setter
def created_by(self, val):
self._struct['created by'] = val
@private.getter
def private(self):
return self._struct.get('info', {}).get('private', False)
@private.setter
def private(self, val):
if not val:
try:
del self._struct['info']['private']
except KeyError:
pass
else:
self._struct['info']['private'] = 1
@name.getter
def name(self):
return self._struct.get('info', {}).get('name', None)
@name.setter
def name(self, val):
self._struct['info']['name'] = val
def get_magnet(self, detailed=True):
"""Returns torrent magnet link, consisting of BTIH (BitTorrent Info Hash) URN
anr optional other information.
:param bool|list|tuple|set detailed:
For boolean - whether additional info (such as trackers) should be included.
For iterable - expected allowed parameter names:
tr - trackers
ws - webseeds
"""
result = 'magnet:?xt=urn:btih:' + self.info_hash
def add_tr():
urls = self.announce_urls
if not urls:
return
trackers = []
urls = urls[0] # Only primary announcers are enough.
for url in urls:
trackers.append(('tr', url))
if trackers:
return urlencode(trackers)
def add_ws():
webseeds = [('ws', url) for url in self.webseeds]
if webseeds:
return urlencode(webseeds)
params_map = {
'tr': add_tr,
'ws': add_ws,
}
if detailed:
details = []
if isinstance(detailed, _ITERABLE_TYPES):
requested_params = detailed
else:
requested_params = params_map.keys()
for param in requested_params:
param_val = params_map[param]()
param_val and details.append(param_val)
if details:
result += '&%s' % '&'.join(details)
return result
def to_string(self):
"""Returns bytes representing torrent file.
:param str encoding: Encoding used by strings in Torrent object.
:rtype: bytearray
"""
return Bencode.encode(self._struct)
@classmethod
def _get_target_files_info(cls, src_path):
src_path = u'%s' % src_path # Force walk() to return unicode names.
is_dir = isdir(src_path)
target_files = []
if is_dir:
for base, _, files in walk(src_path):
target_files.extend([join(base, fname) for fname in sorted(files)])
else:
target_files.append(src_path)
target_files_ = []
total_size = 0
for fpath in target_files:
file_size = getsize(fpath)
if not file_size:
continue
target_files_.append((fpath, file_size, normpath(fpath.replace(src_path, '')).strip(sep).split(sep)))
total_size += file_size
return target_files_, total_size
@classmethod
def create_from(cls, src_path):
"""Returns Torrent object created from a file or a directory.
:param str src_path:
:rtype: Torrent
"""
is_dir = isdir(src_path)
target_files, size_data = cls._get_target_files_info(src_path)
SIZE_MIN = 32768 # 32 KiB
SIZE_DEFAULT = 262144 # 256 KiB
SIZE_MAX = 1048576 # 1 MiB
CHUNKS_MIN = 1000 # todo use those limits as advised
CHUNKS_MAX = 2200
size_piece = SIZE_MIN
if size_data > SIZE_MIN:
size_piece = SIZE_DEFAULT
if size_piece > SIZE_MAX:
size_piece = SIZE_MAX
def read(filepath):
with open(filepath, 'rb') as f:
while True:
chunk = f.read(size_piece - len(pieces_buffer))
chunk_size = len(chunk)
if chunk_size == 0:
break
yield chunk
pieces = bytearray()
pieces_buffer = bytearray()
for fpath, _, _ in target_files:
for chunk in read(fpath):
pieces_buffer += chunk
if len(pieces_buffer) == size_piece:
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
if len(pieces_buffer):
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
info = {
'name': basename(src_path),
'pieces': bytes(pieces),
'piece length': size_piece,
}
if is_dir:
files = []
for _, length, path in target_files:
files.append({'length': length, 'path': path})
info['files'] = files
else:
info['length'] = target_files[0][1]
torrent = cls({'info': info})
torrent.created_by = get_app_version()
torrent.creation_date = datetime.utcnow()
return torrent
@classmethod
def from_string(cls, string):
"""Alternative constructor to get Torrent object from string.
:param str string:
:rtype: Torrent
"""
return cls(Bencode.read_string(string))
@classmethod
def from_file(cls, filepath):
"""Alternative constructor to get Torrent object from file.
:param str filepath:
:rtype: Torrent
"""
torrent = cls(Bencode.read_file(filepath))
torrent._filepath = filepath
return torrent
|
idlesign/torrentool
|
torrentool/torrent.py
|
Torrent.create_from
|
python
|
def create_from(cls, src_path):
is_dir = isdir(src_path)
target_files, size_data = cls._get_target_files_info(src_path)
SIZE_MIN = 32768 # 32 KiB
SIZE_DEFAULT = 262144 # 256 KiB
SIZE_MAX = 1048576 # 1 MiB
CHUNKS_MIN = 1000 # todo use those limits as advised
CHUNKS_MAX = 2200
size_piece = SIZE_MIN
if size_data > SIZE_MIN:
size_piece = SIZE_DEFAULT
if size_piece > SIZE_MAX:
size_piece = SIZE_MAX
def read(filepath):
with open(filepath, 'rb') as f:
while True:
chunk = f.read(size_piece - len(pieces_buffer))
chunk_size = len(chunk)
if chunk_size == 0:
break
yield chunk
pieces = bytearray()
pieces_buffer = bytearray()
for fpath, _, _ in target_files:
for chunk in read(fpath):
pieces_buffer += chunk
if len(pieces_buffer) == size_piece:
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
if len(pieces_buffer):
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
info = {
'name': basename(src_path),
'pieces': bytes(pieces),
'piece length': size_piece,
}
if is_dir:
files = []
for _, length, path in target_files:
files.append({'length': length, 'path': path})
info['files'] = files
else:
info['length'] = target_files[0][1]
torrent = cls({'info': info})
torrent.created_by = get_app_version()
torrent.creation_date = datetime.utcnow()
return torrent
|
Returns Torrent object created from a file or a directory.
:param str src_path:
:rtype: Torrent
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/torrent.py#L348-L416
|
[
"def get_app_version():\n \"\"\"Returns full version string including application name\n suitable for putting into Torrent.created_by.\n\n \"\"\"\n from torrentool import VERSION\n return 'torrentool/%s' % '.'.join(map(str, VERSION))\n",
"def _get_target_files_info(cls, src_path):\n src_path = u'%s' % src_path # Force walk() to return unicode names.\n\n is_dir = isdir(src_path)\n target_files = []\n\n if is_dir:\n for base, _, files in walk(src_path):\n target_files.extend([join(base, fname) for fname in sorted(files)])\n\n else:\n target_files.append(src_path)\n\n target_files_ = []\n total_size = 0\n for fpath in target_files:\n file_size = getsize(fpath)\n if not file_size:\n continue\n target_files_.append((fpath, file_size, normpath(fpath.replace(src_path, '')).strip(sep).split(sep)))\n total_size += file_size\n\n return target_files_, total_size\n",
"def read(filepath):\n with open(filepath, 'rb') as f:\n while True:\n chunk = f.read(size_piece - len(pieces_buffer))\n chunk_size = len(chunk)\n if chunk_size == 0:\n break\n yield chunk\n"
] |
class Torrent(object):
"""Represents a torrent file, and exposes utilities to work with it."""
_filepath = None
def __init__(self, dict_struct=None):
dict_struct = dict_struct or {'info': {}}
self._struct = dict_struct
def __str__(self):
return 'Torrent: %s' % self.name
announce_urls = property()
"""List of lists of tracker announce URLs."""
comment = property()
"""Optional. Free-form textual comments of the author."""
creation_date = property()
"""Optional. The creation time of the torrent, in standard UNIX epoch format. UTC."""
created_by = property()
"""Optional. Name and version of the program used to create the .torrent"""
private = property()
"""Optional. If True the client MUST publish its presence to get other peers
ONLY via the trackers explicitly described in the metainfo file. If False or is not present,
the client may obtain peer from other means, e.g. PEX peer exchange, dht.
"""
name = property()
"""Torrent name (title)."""
webseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also: Torrent.httpseeds
http://bittorrent.org/beps/bep_0019.html
"""
httpseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also and prefer Torrent.webseeds
http://bittorrent.org/beps/bep_0017.html
"""
def _list_getter(self, key):
return self._struct.get(key, [])
def _list_setter(self, key, val):
if val is None:
try:
del self._struct[key]
return
except KeyError:
return
if not isinstance(val, _ITERABLE_TYPES):
val = [val]
self._struct[key] = val
@webseeds.getter
def webseeds(self):
return self._list_getter('url-list')
@webseeds.setter
def webseeds(self, val):
self._list_setter('url-list', val)
@httpseeds.getter
def httpseeds(self):
return self._list_getter('httpseeds')
@httpseeds.setter
def httpseeds(self, val):
self._list_setter('httpseeds', val)
@property
def files(self):
"""Files in torrent.
List of namedtuples (filepath, size).
:rtype: list[TorrentFile]
"""
files = []
info = self._struct.get('info')
if not info:
return files
if 'files' in info:
base = info['name']
for f in info['files']:
files.append(TorrentFile(join(base, *f['path']), f['length']))
else:
files.append(TorrentFile(info['name'], info['length']))
return files
@property
def total_size(self):
"""Total size of all files in torrent."""
return reduce(lambda prev, curr: prev + curr[1], self.files, 0)
@property
def info_hash(self):
"""Hash of torrent file info section. Also known as torrent hash."""
info = self._struct.get('info')
if not info:
return None
return sha1(Bencode.encode(info)).hexdigest()
@property
def magnet_link(self):
"""Magnet link using BTIH (BitTorrent Info Hash) URN."""
return self.get_magnet(detailed=False)
@announce_urls.getter
def announce_urls(self):
"""List of lists of announce (tracker) URLs.
First inner list is considered as primary announcers list,
the following lists as back-ups.
http://bittorrent.org/beps/bep_0012.html
"""
urls = self._struct.get('announce-list')
if not urls:
urls = self._struct.get('announce')
if not urls:
return []
urls = [[urls]]
return urls
@announce_urls.setter
def announce_urls(self, val):
self._struct['announce'] = ''
self._struct['announce-list'] = []
def set_single(val):
del self._struct['announce-list']
self._struct['announce'] = val
if isinstance(val, _ITERABLE_TYPES):
length = len(val)
if length:
if length == 1:
set_single(val[0])
else:
for item in val:
if not isinstance(item, _ITERABLE_TYPES):
item = [item]
self._struct['announce-list'].append(item)
self._struct['announce'] = val[0]
else:
set_single(val)
@comment.getter
def comment(self):
return self._struct.get('comment')
@comment.setter
def comment(self, val):
self._struct['comment'] = val
@creation_date.getter
def creation_date(self):
date = self._struct.get('creation date')
if date is not None:
date = datetime.utcfromtimestamp(int(date))
return date
@creation_date.setter
def creation_date(self, val):
self._struct['creation date'] = timegm(val.timetuple())
@created_by.getter
def created_by(self):
return self._struct.get('created by')
@created_by.setter
def created_by(self, val):
self._struct['created by'] = val
@private.getter
def private(self):
return self._struct.get('info', {}).get('private', False)
@private.setter
def private(self, val):
if not val:
try:
del self._struct['info']['private']
except KeyError:
pass
else:
self._struct['info']['private'] = 1
@name.getter
def name(self):
return self._struct.get('info', {}).get('name', None)
@name.setter
def name(self, val):
self._struct['info']['name'] = val
def get_magnet(self, detailed=True):
"""Returns torrent magnet link, consisting of BTIH (BitTorrent Info Hash) URN
anr optional other information.
:param bool|list|tuple|set detailed:
For boolean - whether additional info (such as trackers) should be included.
For iterable - expected allowed parameter names:
tr - trackers
ws - webseeds
"""
result = 'magnet:?xt=urn:btih:' + self.info_hash
def add_tr():
urls = self.announce_urls
if not urls:
return
trackers = []
urls = urls[0] # Only primary announcers are enough.
for url in urls:
trackers.append(('tr', url))
if trackers:
return urlencode(trackers)
def add_ws():
webseeds = [('ws', url) for url in self.webseeds]
if webseeds:
return urlencode(webseeds)
params_map = {
'tr': add_tr,
'ws': add_ws,
}
if detailed:
details = []
if isinstance(detailed, _ITERABLE_TYPES):
requested_params = detailed
else:
requested_params = params_map.keys()
for param in requested_params:
param_val = params_map[param]()
param_val and details.append(param_val)
if details:
result += '&%s' % '&'.join(details)
return result
def to_file(self, filepath=None):
"""Writes Torrent object into file, either
:param filepath:
"""
if filepath is None and self._filepath is None:
raise TorrentError('Unable to save torrent to file: no filepath supplied.')
if filepath is not None:
self._filepath = filepath
with open(self._filepath, mode='wb') as f:
f.write(self.to_string())
def to_string(self):
"""Returns bytes representing torrent file.
:param str encoding: Encoding used by strings in Torrent object.
:rtype: bytearray
"""
return Bencode.encode(self._struct)
@classmethod
def _get_target_files_info(cls, src_path):
src_path = u'%s' % src_path # Force walk() to return unicode names.
is_dir = isdir(src_path)
target_files = []
if is_dir:
for base, _, files in walk(src_path):
target_files.extend([join(base, fname) for fname in sorted(files)])
else:
target_files.append(src_path)
target_files_ = []
total_size = 0
for fpath in target_files:
file_size = getsize(fpath)
if not file_size:
continue
target_files_.append((fpath, file_size, normpath(fpath.replace(src_path, '')).strip(sep).split(sep)))
total_size += file_size
return target_files_, total_size
@classmethod
@classmethod
def from_string(cls, string):
"""Alternative constructor to get Torrent object from string.
:param str string:
:rtype: Torrent
"""
return cls(Bencode.read_string(string))
@classmethod
def from_file(cls, filepath):
"""Alternative constructor to get Torrent object from file.
:param str filepath:
:rtype: Torrent
"""
torrent = cls(Bencode.read_file(filepath))
torrent._filepath = filepath
return torrent
|
idlesign/torrentool
|
torrentool/torrent.py
|
Torrent.from_file
|
python
|
def from_file(cls, filepath):
torrent = cls(Bencode.read_file(filepath))
torrent._filepath = filepath
return torrent
|
Alternative constructor to get Torrent object from file.
:param str filepath:
:rtype: Torrent
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/torrent.py#L428-L436
|
[
"def read_file(cls, filepath):\n \"\"\"Decodes bencoded data of a given file.\n\n Returns decoded structure(s).\n\n :param str filepath:\n :rtype: list\n \"\"\"\n with open(filepath, mode='rb') as f:\n contents = f.read()\n return cls.decode(contents)\n"
] |
class Torrent(object):
"""Represents a torrent file, and exposes utilities to work with it."""
_filepath = None
def __init__(self, dict_struct=None):
dict_struct = dict_struct or {'info': {}}
self._struct = dict_struct
def __str__(self):
return 'Torrent: %s' % self.name
announce_urls = property()
"""List of lists of tracker announce URLs."""
comment = property()
"""Optional. Free-form textual comments of the author."""
creation_date = property()
"""Optional. The creation time of the torrent, in standard UNIX epoch format. UTC."""
created_by = property()
"""Optional. Name and version of the program used to create the .torrent"""
private = property()
"""Optional. If True the client MUST publish its presence to get other peers
ONLY via the trackers explicitly described in the metainfo file. If False or is not present,
the client may obtain peer from other means, e.g. PEX peer exchange, dht.
"""
name = property()
"""Torrent name (title)."""
webseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also: Torrent.httpseeds
http://bittorrent.org/beps/bep_0019.html
"""
httpseeds = property()
"""A list of URLs where torrent data can be retrieved.
See also and prefer Torrent.webseeds
http://bittorrent.org/beps/bep_0017.html
"""
def _list_getter(self, key):
return self._struct.get(key, [])
def _list_setter(self, key, val):
if val is None:
try:
del self._struct[key]
return
except KeyError:
return
if not isinstance(val, _ITERABLE_TYPES):
val = [val]
self._struct[key] = val
@webseeds.getter
def webseeds(self):
return self._list_getter('url-list')
@webseeds.setter
def webseeds(self, val):
self._list_setter('url-list', val)
@httpseeds.getter
def httpseeds(self):
return self._list_getter('httpseeds')
@httpseeds.setter
def httpseeds(self, val):
self._list_setter('httpseeds', val)
@property
def files(self):
"""Files in torrent.
List of namedtuples (filepath, size).
:rtype: list[TorrentFile]
"""
files = []
info = self._struct.get('info')
if not info:
return files
if 'files' in info:
base = info['name']
for f in info['files']:
files.append(TorrentFile(join(base, *f['path']), f['length']))
else:
files.append(TorrentFile(info['name'], info['length']))
return files
@property
def total_size(self):
"""Total size of all files in torrent."""
return reduce(lambda prev, curr: prev + curr[1], self.files, 0)
@property
def info_hash(self):
"""Hash of torrent file info section. Also known as torrent hash."""
info = self._struct.get('info')
if not info:
return None
return sha1(Bencode.encode(info)).hexdigest()
@property
def magnet_link(self):
"""Magnet link using BTIH (BitTorrent Info Hash) URN."""
return self.get_magnet(detailed=False)
@announce_urls.getter
def announce_urls(self):
"""List of lists of announce (tracker) URLs.
First inner list is considered as primary announcers list,
the following lists as back-ups.
http://bittorrent.org/beps/bep_0012.html
"""
urls = self._struct.get('announce-list')
if not urls:
urls = self._struct.get('announce')
if not urls:
return []
urls = [[urls]]
return urls
@announce_urls.setter
def announce_urls(self, val):
self._struct['announce'] = ''
self._struct['announce-list'] = []
def set_single(val):
del self._struct['announce-list']
self._struct['announce'] = val
if isinstance(val, _ITERABLE_TYPES):
length = len(val)
if length:
if length == 1:
set_single(val[0])
else:
for item in val:
if not isinstance(item, _ITERABLE_TYPES):
item = [item]
self._struct['announce-list'].append(item)
self._struct['announce'] = val[0]
else:
set_single(val)
@comment.getter
def comment(self):
return self._struct.get('comment')
@comment.setter
def comment(self, val):
self._struct['comment'] = val
@creation_date.getter
def creation_date(self):
date = self._struct.get('creation date')
if date is not None:
date = datetime.utcfromtimestamp(int(date))
return date
@creation_date.setter
def creation_date(self, val):
self._struct['creation date'] = timegm(val.timetuple())
@created_by.getter
def created_by(self):
return self._struct.get('created by')
@created_by.setter
def created_by(self, val):
self._struct['created by'] = val
@private.getter
def private(self):
return self._struct.get('info', {}).get('private', False)
@private.setter
def private(self, val):
if not val:
try:
del self._struct['info']['private']
except KeyError:
pass
else:
self._struct['info']['private'] = 1
@name.getter
def name(self):
return self._struct.get('info', {}).get('name', None)
@name.setter
def name(self, val):
self._struct['info']['name'] = val
def get_magnet(self, detailed=True):
"""Returns torrent magnet link, consisting of BTIH (BitTorrent Info Hash) URN
anr optional other information.
:param bool|list|tuple|set detailed:
For boolean - whether additional info (such as trackers) should be included.
For iterable - expected allowed parameter names:
tr - trackers
ws - webseeds
"""
result = 'magnet:?xt=urn:btih:' + self.info_hash
def add_tr():
urls = self.announce_urls
if not urls:
return
trackers = []
urls = urls[0] # Only primary announcers are enough.
for url in urls:
trackers.append(('tr', url))
if trackers:
return urlencode(trackers)
def add_ws():
webseeds = [('ws', url) for url in self.webseeds]
if webseeds:
return urlencode(webseeds)
params_map = {
'tr': add_tr,
'ws': add_ws,
}
if detailed:
details = []
if isinstance(detailed, _ITERABLE_TYPES):
requested_params = detailed
else:
requested_params = params_map.keys()
for param in requested_params:
param_val = params_map[param]()
param_val and details.append(param_val)
if details:
result += '&%s' % '&'.join(details)
return result
def to_file(self, filepath=None):
"""Writes Torrent object into file, either
:param filepath:
"""
if filepath is None and self._filepath is None:
raise TorrentError('Unable to save torrent to file: no filepath supplied.')
if filepath is not None:
self._filepath = filepath
with open(self._filepath, mode='wb') as f:
f.write(self.to_string())
def to_string(self):
"""Returns bytes representing torrent file.
:param str encoding: Encoding used by strings in Torrent object.
:rtype: bytearray
"""
return Bencode.encode(self._struct)
@classmethod
def _get_target_files_info(cls, src_path):
src_path = u'%s' % src_path # Force walk() to return unicode names.
is_dir = isdir(src_path)
target_files = []
if is_dir:
for base, _, files in walk(src_path):
target_files.extend([join(base, fname) for fname in sorted(files)])
else:
target_files.append(src_path)
target_files_ = []
total_size = 0
for fpath in target_files:
file_size = getsize(fpath)
if not file_size:
continue
target_files_.append((fpath, file_size, normpath(fpath.replace(src_path, '')).strip(sep).split(sep)))
total_size += file_size
return target_files_, total_size
@classmethod
def create_from(cls, src_path):
"""Returns Torrent object created from a file or a directory.
:param str src_path:
:rtype: Torrent
"""
is_dir = isdir(src_path)
target_files, size_data = cls._get_target_files_info(src_path)
SIZE_MIN = 32768 # 32 KiB
SIZE_DEFAULT = 262144 # 256 KiB
SIZE_MAX = 1048576 # 1 MiB
CHUNKS_MIN = 1000 # todo use those limits as advised
CHUNKS_MAX = 2200
size_piece = SIZE_MIN
if size_data > SIZE_MIN:
size_piece = SIZE_DEFAULT
if size_piece > SIZE_MAX:
size_piece = SIZE_MAX
def read(filepath):
with open(filepath, 'rb') as f:
while True:
chunk = f.read(size_piece - len(pieces_buffer))
chunk_size = len(chunk)
if chunk_size == 0:
break
yield chunk
pieces = bytearray()
pieces_buffer = bytearray()
for fpath, _, _ in target_files:
for chunk in read(fpath):
pieces_buffer += chunk
if len(pieces_buffer) == size_piece:
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
if len(pieces_buffer):
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
info = {
'name': basename(src_path),
'pieces': bytes(pieces),
'piece length': size_piece,
}
if is_dir:
files = []
for _, length, path in target_files:
files.append({'length': length, 'path': path})
info['files'] = files
else:
info['length'] = target_files[0][1]
torrent = cls({'info': info})
torrent.created_by = get_app_version()
torrent.creation_date = datetime.utcnow()
return torrent
@classmethod
def from_string(cls, string):
"""Alternative constructor to get Torrent object from string.
:param str string:
:rtype: Torrent
"""
return cls(Bencode.read_string(string))
@classmethod
|
idlesign/torrentool
|
torrentool/utils.py
|
humanize_filesize
|
python
|
def humanize_filesize(bytes_size):
if not bytes_size:
return '0 B'
names = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
name_idx = int(math.floor(math.log(bytes_size, 1024)))
size = round(bytes_size / math.pow(1024, name_idx), 2)
return '%s %s' % (size, names[name_idx])
|
Returns human readable filesize.
:param int bytes_size:
:rtype: str
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/utils.py#L20-L34
| null |
import math
from os import path
from .exceptions import RemoteUploadError, RemoteDownloadError
OPEN_TRACKERS_FILENAME = 'open_trackers.ini'
REMOTE_TIMEOUT = 4
def get_app_version():
"""Returns full version string including application name
suitable for putting into Torrent.created_by.
"""
from torrentool import VERSION
return 'torrentool/%s' % '.'.join(map(str, VERSION))
def humanize_filesize(bytes_size):
"""Returns human readable filesize.
:param int bytes_size:
:rtype: str
"""
if not bytes_size:
return '0 B'
names = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
name_idx = int(math.floor(math.log(bytes_size, 1024)))
size = round(bytes_size / math.pow(1024, name_idx), 2)
return '%s %s' % (size, names[name_idx])
def upload_to_cache_server(fpath):
"""Uploads .torrent file to a cache server.
Returns upload file URL.
:rtype: str
"""
url_base = 'http://torrage.info'
url_upload = '%s/autoupload.php' % url_base
url_download = '%s/torrent.php?h=' % url_base
file_field = 'torrent'
try:
import requests
response = requests.post(url_upload, files={file_field: open(fpath, 'rb')}, timeout=REMOTE_TIMEOUT)
response.raise_for_status()
info_cache = response.text
return url_download + info_cache
except (ImportError, requests.RequestException) as e:
# Now trace is lost. `raise from` to consider.
raise RemoteUploadError('Unable to upload to %s: %s' % (url_upload, e))
def get_open_trackers_from_remote():
"""Returns open trackers announce URLs list from remote repo."""
url_base = 'https://raw.githubusercontent.com/idlesign/torrentool/master/torrentool/repo'
url = '%s/%s' % (url_base, OPEN_TRACKERS_FILENAME)
try:
import requests
response = requests.get(url, timeout=REMOTE_TIMEOUT)
response.raise_for_status()
open_trackers = response.text.splitlines()
except (ImportError, requests.RequestException) as e:
# Now trace is lost. `raise from` to consider.
raise RemoteDownloadError('Unable to download from %s: %s' % (url, e))
return open_trackers
def get_open_trackers_from_local():
"""Returns open trackers announce URLs list from local backup."""
with open(path.join(path.dirname(__file__), 'repo', OPEN_TRACKERS_FILENAME)) as f:
open_trackers = map(str.strip, f.readlines())
return list(open_trackers)
|
idlesign/torrentool
|
torrentool/utils.py
|
upload_to_cache_server
|
python
|
def upload_to_cache_server(fpath):
url_base = 'http://torrage.info'
url_upload = '%s/autoupload.php' % url_base
url_download = '%s/torrent.php?h=' % url_base
file_field = 'torrent'
try:
import requests
response = requests.post(url_upload, files={file_field: open(fpath, 'rb')}, timeout=REMOTE_TIMEOUT)
response.raise_for_status()
info_cache = response.text
return url_download + info_cache
except (ImportError, requests.RequestException) as e:
# Now trace is lost. `raise from` to consider.
raise RemoteUploadError('Unable to upload to %s: %s' % (url_upload, e))
|
Uploads .torrent file to a cache server.
Returns upload file URL.
:rtype: str
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/utils.py#L37-L61
| null |
import math
from os import path
from .exceptions import RemoteUploadError, RemoteDownloadError
OPEN_TRACKERS_FILENAME = 'open_trackers.ini'
REMOTE_TIMEOUT = 4
def get_app_version():
"""Returns full version string including application name
suitable for putting into Torrent.created_by.
"""
from torrentool import VERSION
return 'torrentool/%s' % '.'.join(map(str, VERSION))
def humanize_filesize(bytes_size):
"""Returns human readable filesize.
:param int bytes_size:
:rtype: str
"""
if not bytes_size:
return '0 B'
names = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
name_idx = int(math.floor(math.log(bytes_size, 1024)))
size = round(bytes_size / math.pow(1024, name_idx), 2)
return '%s %s' % (size, names[name_idx])
def get_open_trackers_from_remote():
"""Returns open trackers announce URLs list from remote repo."""
url_base = 'https://raw.githubusercontent.com/idlesign/torrentool/master/torrentool/repo'
url = '%s/%s' % (url_base, OPEN_TRACKERS_FILENAME)
try:
import requests
response = requests.get(url, timeout=REMOTE_TIMEOUT)
response.raise_for_status()
open_trackers = response.text.splitlines()
except (ImportError, requests.RequestException) as e:
# Now trace is lost. `raise from` to consider.
raise RemoteDownloadError('Unable to download from %s: %s' % (url, e))
return open_trackers
def get_open_trackers_from_local():
"""Returns open trackers announce URLs list from local backup."""
with open(path.join(path.dirname(__file__), 'repo', OPEN_TRACKERS_FILENAME)) as f:
open_trackers = map(str.strip, f.readlines())
return list(open_trackers)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.