text stringlengths 81 112k |
|---|
Returns highest available version for a package in a list of versions
Uses pkg_resources to parse the versions
@param versions: List of PyPI package versions
@type versions: List of strings
@returns: string of a PyPI package version
def get_highest_version(versions):
"""
Returns highest available version for a package in a list of versions
Uses pkg_resources to parse the versions
@param versions: List of PyPI package versions
@type versions: List of strings
@returns: string of a PyPI package version
"""
sorted_versions = []
for ver in versions:
sorted_versions.append((pkg_resources.parse_version(ver), ver))
sorted_versions = sorted(sorted_versions)
sorted_versions.reverse()
return sorted_versions[0][1] |
Yield installed packages
@param show: Type of package(s) to show; active, non-active or all
@type show: string: "active", "non-active", "all"
@param pkg_name: PyPI project name
@type pkg_name: string
@param version: project's PyPI version
@type version: string
@returns: yields tuples of distribution and True or False depending
on active state. e.g. (dist, True)
def get_distributions(self, show, pkg_name="", version=""):
"""
Yield installed packages
@param show: Type of package(s) to show; active, non-active or all
@type show: string: "active", "non-active", "all"
@param pkg_name: PyPI project name
@type pkg_name: string
@param version: project's PyPI version
@type version: string
@returns: yields tuples of distribution and True or False depending
on active state. e.g. (dist, True)
"""
#pylint: disable-msg=W0612
#'name' is a placeholder for the sorted list
for name, dist in self.get_alpha(show, pkg_name, version):
ver = dist.version
for package in self.environment[dist.project_name]:
if ver == package.version:
if show == "nonactive" and dist not in self.working_set:
yield (dist, self.query_activated(dist))
elif show == "active" and dist in self.working_set:
yield (dist, self.query_activated(dist))
elif show == "all":
yield (dist, self.query_activated(dist)) |
Return list of alphabetized packages
@param pkg_name: PyPI project name
@type pkg_name: string
@param version: project's PyPI version
@type version: string
@returns: Alphabetized list of tuples. Each tuple contains
a string and a pkg_resources Distribution object.
The string is the project name + version.
def get_alpha(self, show, pkg_name="", version=""):
"""
Return list of alphabetized packages
@param pkg_name: PyPI project name
@type pkg_name: string
@param version: project's PyPI version
@type version: string
@returns: Alphabetized list of tuples. Each tuple contains
a string and a pkg_resources Distribution object.
The string is the project name + version.
"""
alpha_list = []
for dist in self.get_packages(show):
if pkg_name and dist.project_name != pkg_name:
#Only checking for a single package name
pass
elif version and dist.version != version:
#Only checking for a single version of a package
pass
else:
alpha_list.append((dist.project_name + dist.version, dist))
alpha_list.sort()
return alpha_list |
Return list of Distributions filtered by active status or all
@param show: Type of package(s) to show; active, non-active or all
@type show: string: "active", "non-active", "all"
@returns: list of pkg_resources Distribution objects
def get_packages(self, show):
"""
Return list of Distributions filtered by active status or all
@param show: Type of package(s) to show; active, non-active or all
@type show: string: "active", "non-active", "all"
@returns: list of pkg_resources Distribution objects
"""
if show == 'nonactive' or show == "all":
all_packages = []
for package in self.environment:
#There may be multiple versions of same packages
for i in range(len(self.environment[package])):
if self.environment[package][i]:
all_packages.append(self.environment[package][i])
return all_packages
else:
# Only activated packages
return self.working_set |
Return case-sensitive package name given any-case package name
@param project_name: PyPI project name
@type project_name: string
def case_sensitive_name(self, package_name):
"""
Return case-sensitive package name given any-case package name
@param project_name: PyPI project name
@type project_name: string
"""
if len(self.environment[package_name]):
return self.environment[package_name][0].project_name |
Non-atomic cache increment operation. Not optimal but
consistent across different cache backends.
def cache_incr(self, key):
"""
Non-atomic cache increment operation. Not optimal but
consistent across different cache backends.
"""
cache.set(key, cache.get(key, 0) + 1, self.expire_after()) |
Call all method on plugins in list, that define it, with provided
arguments. The first response that is not None is returned.
def call_plugins(plugins, method, *arg, **kw):
"""Call all method on plugins in list, that define it, with provided
arguments. The first response that is not None is returned.
"""
for plug in plugins:
func = getattr(plug, method, None)
if func is None:
continue
#LOG.debug("call plugin %s: %s", plug.name, method)
result = func(*arg, **kw)
if result is not None:
return result
return None |
Load plugins, either builtin, others, or both.
def load_plugins(builtin=True, others=True):
"""Load plugins, either builtin, others, or both.
"""
for entry_point in pkg_resources.iter_entry_points('yolk.plugins'):
#LOG.debug("load plugin %s" % entry_point)
try:
plugin = entry_point.load()
except KeyboardInterrupt:
raise
except Exception as err_msg:
# never want a plugin load to exit yolk
# but we can't log here because the logger is not yet
# configured
warn("Unable to load plugin %s: %s" % \
(entry_point, err_msg), RuntimeWarning)
continue
if plugin.__module__.startswith('yolk.plugins'):
if builtin:
yield plugin
elif others:
yield plugin |
Returns a Boto connection to the provided S3 bucket.
def s3_connect(bucket_name, s3_access_key_id, s3_secret_key):
""" Returns a Boto connection to the provided S3 bucket. """
conn = connect_s3(s3_access_key_id, s3_secret_key)
try:
return conn.get_bucket(bucket_name)
except S3ResponseError as e:
if e.status == 403:
raise Exception("Bad Amazon S3 credentials.")
raise |
Lists the contents of the S3 bucket that end in .tbz and match
the passed prefix, if any.
def s3_list(s3_bucket, s3_access_key_id, s3_secret_key, prefix=None):
""" Lists the contents of the S3 bucket that end in .tbz and match
the passed prefix, if any. """
bucket = s3_connect(s3_bucket, s3_access_key_id, s3_secret_key)
return sorted([key.name for key in bucket.list()
if key.name.endswith(".tbz")
and (prefix is None or key.name.startswith(prefix))]) |
Downloads the file matching the provided key, in the provided bucket,
from Amazon S3.
If s3_file_key is none, it downloads the last file
from the provided bucket with the .tbz extension, filtering by
prefix if it is provided.
def s3_download(output_file_path, s3_bucket, s3_access_key_id, s3_secret_key,
s3_file_key=None, prefix=None):
""" Downloads the file matching the provided key, in the provided bucket,
from Amazon S3.
If s3_file_key is none, it downloads the last file
from the provided bucket with the .tbz extension, filtering by
prefix if it is provided. """
bucket = s3_connect(s3_bucket, s3_access_key_id, s3_secret_key)
if not s3_file_key:
keys = s3_list(s3_bucket, s3_access_key_id, s3_secret_key, prefix)
if not keys:
raise Exception("Target S3 bucket is empty")
s3_file_key = keys[-1]
key = Key(bucket, s3_file_key)
with open(output_file_path, "w+") as f:
f.write(key.read()) |
Uploads the to Amazon S3 the contents of the provided file, keyed
with the name of the file.
def s3_upload(source_file_path, bucket_name, s3_access_key_id, s3_secret_key):
""" Uploads the to Amazon S3 the contents of the provided file, keyed
with the name of the file. """
key = s3_key(bucket_name, s3_access_key_id, s3_secret_key)
file_name = source_file_path.split("/")[-1]
key.key = file_name
if key.exists():
raise Exception("s3 key %s already exists for current period."
% (file_name))
key.set_contents_from_filename(source_file_path) |
Attempt to remedy:
https://www.riverbankcomputing.com/pipermail/pyqt/2016-February/037015.html
def fix_pyqt5_QGraphicsItem_itemChange():
"""
Attempt to remedy:
https://www.riverbankcomputing.com/pipermail/pyqt/2016-February/037015.html
"""
from PyQt5.QtWidgets import QGraphicsObject, QGraphicsItem
class Obj(QGraphicsObject):
def itemChange(self, change, value):
return QGraphicsObject.itemChange(self, change, value)
obj = Obj()
parent = Obj()
obj.setParentItem(parent)
if obj.parentItem() is None:
# There was probably already some signal defined using QObject's
# subclass from QtWidgets.
# We will monkey patch the QGraphicsItem.itemChange and explicitly
# sip.cast all input and output QGraphicsItem instances
import sip
QGraphicsItem_itemChange_old = QGraphicsItem.itemChange
# All the QGraphicsItem.ItemChange flags which accept/return
# a QGraphicsItem
changeset = {
QGraphicsItem.ItemParentChange,
QGraphicsItem.ItemParentHasChanged,
QGraphicsItem.ItemChildAddedChange,
QGraphicsItem.ItemChildRemovedChange,
}
def QGraphicsItem_itemChange(self, change, value):
if change in changeset:
if isinstance(value, QGraphicsItem):
value = sip.cast(value, QGraphicsItem)
rval = QGraphicsItem_itemChange_old(self, change, value)
if isinstance(rval, QGraphicsItem):
rval = sip.cast(rval, QGraphicsItem)
return rval
else:
return QGraphicsItem_itemChange_old(self, change, value)
QGraphicsItem.itemChange = QGraphicsItem_itemChange
warnings.warn("Monkey patching QGraphicsItem.itemChange",
RuntimeWarning) |
Setup the optparser
@returns: opt_parser.OptionParser
def setup_opt_parser():
"""
Setup the optparser
@returns: opt_parser.OptionParser
"""
#pylint: disable-msg=C0301
#line too long
usage = "usage: %prog [options]"
opt_parser = optparse.OptionParser(usage=usage)
opt_parser.add_option("--version", action='store_true', dest=
"yolk_version", default=False, help=
"Show yolk version and exit.")
opt_parser.add_option("--debug", action='store_true', dest=
"debug", default=False, help=
"Show debugging information.")
opt_parser.add_option("-q", "--quiet", action='store_true', dest=
"quiet", default=False, help=
"Show less output.")
group_local = optparse.OptionGroup(opt_parser,
"Query installed Python packages",
"The following options show information about installed Python packages. Activated packages are normal packages on sys.path that can be imported. Non-activated packages need 'pkg_resources.require()' before they can be imported, such as packages installed with 'easy_install --multi-version'. PKG_SPEC can be either a package name or package name and version e.g. Paste==0.9")
group_local.add_option("-l", "--list", action='store_true', dest=
"show_all", default=False, help=
"List all Python packages installed by distutils or setuptools. Use PKG_SPEC to narrow results.")
group_local.add_option("-a", "--activated", action='store_true',
dest="show_active", default=False, help=
'List activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-n", "--non-activated", action='store_true',
dest="show_non_active", default=False, help=
'List non-activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-m", "--metadata", action='store_true', dest=
"metadata", default=False, help=
'Show all metadata for packages installed by ' +
'setuptools (use with -l -a or -n)')
group_local.add_option("-f", "--fields", action="store", dest=
"fields", default=False, help=
'Show specific metadata fields. ' +
'(use with -m or -M)')
group_local.add_option("-d", "--depends", action='store', dest=
"show_deps", metavar='PKG_SPEC',
help= "Show dependencies for a package installed by " +
"setuptools if they are available.")
group_local.add_option("--entry-points", action='store',
dest="show_entry_points", default=False, help=
'List entry points for a module. e.g. --entry-points nose.plugins',
metavar="MODULE")
group_local.add_option("--entry-map", action='store',
dest="show_entry_map", default=False, help=
'List entry map for a package. e.g. --entry-map yolk',
metavar="PACKAGE_NAME")
group_pypi = optparse.OptionGroup(opt_parser,
"PyPI (Cheese Shop) options",
"The following options query the Python Package Index:")
group_pypi.add_option("-C", "--changelog", action='store',
dest="show_pypi_changelog", metavar='HOURS',
default=False, help=
"Show detailed ChangeLog for PyPI for last n hours. ")
group_pypi.add_option("-D", "--download-links", action='store',
metavar="PKG_SPEC", dest="show_download_links",
default=False, help=
"Show download URL's for package listed on PyPI. Use with -T to specify egg, source etc.")
group_pypi.add_option("-F", "--fetch-package", action='store',
metavar="PKG_SPEC", dest="fetch",
default=False, help=
"Download package source or egg. You can specify a file type with -T")
group_pypi.add_option("-H", "--browse-homepage", action='store',
metavar="PKG_SPEC", dest="browse_website",
default=False, help=
"Launch web browser at home page for package.")
group_pypi.add_option("-I", "--pypi-index", action='store',
dest="pypi_index",
default=False, help=
"Specify PyPI mirror for package index.")
group_pypi.add_option("-L", "--latest-releases", action='store',
dest="show_pypi_releases", metavar="HOURS",
default=False, help=
"Show PyPI releases for last n hours. ")
group_pypi.add_option("-M", "--query-metadata", action='store',
dest="query_metadata_pypi", default=False,
metavar="PKG_SPEC", help=
"Show metadata for a package listed on PyPI. Use -f to show particular fields.")
group_pypi.add_option("-S", "", action="store", dest="pypi_search",
default=False, help=
"Search PyPI by spec and optional AND/OR operator.",
metavar='SEARCH_SPEC <AND/OR SEARCH_SPEC>')
group_pypi.add_option("-T", "--file-type", action="store", dest=
"file_type", default="all", help=
"You may specify 'source', 'egg', 'svn' or 'all' when using -D.")
group_pypi.add_option("-U", "--show-updates", action='store_true',
dest="show_updates", metavar='<PKG_NAME>',
default=False, help=
"Check PyPI for updates on package(s).")
group_pypi.add_option("-V", "--versions-available", action=
'store', dest="versions_available",
default=False, metavar='PKG_SPEC',
help="Show available versions for given package " +
"listed on PyPI.")
opt_parser.add_option_group(group_local)
opt_parser.add_option_group(group_pypi)
# add opts from plugins
all_plugins = []
for plugcls in load_plugins(others=True):
plug = plugcls()
try:
plug.add_options(opt_parser)
except AttributeError:
pass
return opt_parser |
Check parse options that require pkg_spec
@returns: pkg_spec
def validate_pypi_opts(opt_parser):
"""
Check parse options that require pkg_spec
@returns: pkg_spec
"""
(options, remaining_args) = opt_parser.parse_args()
options_pkg_specs = [ options.versions_available,
options.query_metadata_pypi,
options.show_download_links,
options.browse_website,
options.fetch,
options.show_deps,
]
for pkg_spec in options_pkg_specs:
if pkg_spec:
return pkg_spec |
Write a line to stdout if it isn't in a blacklist
Try to get the name of the calling module to see if we want
to filter it. If there is no calling module, use current
frame in case there's a traceback before there is any calling module
def write(self, inline):
"""
Write a line to stdout if it isn't in a blacklist
Try to get the name of the calling module to see if we want
to filter it. If there is no calling module, use current
frame in case there's a traceback before there is any calling module
"""
frame = inspect.currentframe().f_back
if frame:
mod = frame.f_globals.get('__name__')
else:
mod = sys._getframe(0).f_globals.get('__name__')
if not mod in self.modulenames:
self.stdout.write(inline) |
Return plugin object if CLI option is activated and method exists
@param method: name of plugin's method we're calling
@type method: string
@returns: list of plugins with `method`
def get_plugin(self, method):
"""
Return plugin object if CLI option is activated and method exists
@param method: name of plugin's method we're calling
@type method: string
@returns: list of plugins with `method`
"""
all_plugins = []
for entry_point in pkg_resources.iter_entry_points('yolk.plugins'):
plugin_obj = entry_point.load()
plugin = plugin_obj()
plugin.configure(self.options, None)
if plugin.enabled:
if not hasattr(plugin, method):
self.logger.warn("Error: plugin has no method: %s" % method)
plugin = None
else:
all_plugins.append(plugin)
return all_plugins |
Set log level according to command-line options
@returns: logger object
def set_log_level(self):
"""
Set log level according to command-line options
@returns: logger object
"""
if self.options.debug:
self.logger.setLevel(logging.DEBUG)
elif self.options.quiet:
self.logger.setLevel(logging.ERROR)
else:
self.logger.setLevel(logging.INFO)
self.logger.addHandler(logging.StreamHandler())
return self.logger |
Perform actions based on CLI options
@returns: status code
def run(self):
"""
Perform actions based on CLI options
@returns: status code
"""
opt_parser = setup_opt_parser()
(self.options, remaining_args) = opt_parser.parse_args()
logger = self.set_log_level()
pkg_spec = validate_pypi_opts(opt_parser)
if not pkg_spec:
pkg_spec = remaining_args
self.pkg_spec = pkg_spec
if not self.options.pypi_search and (len(sys.argv) == 1 or\
len(remaining_args) > 2):
opt_parser.print_help()
return 2
#Options that depend on querying installed packages, not PyPI.
#We find the proper case for package names if they are installed,
#otherwise PyPI returns the correct case.
if self.options.show_deps or self.options.show_all or \
self.options.show_active or self.options.show_non_active or \
(self.options.show_updates and pkg_spec):
want_installed = True
else:
want_installed = False
#show_updates may or may not have a pkg_spec
if not want_installed or self.options.show_updates:
self.pypi = CheeseShop(self.options.debug)
#XXX: We should return 2 here if we couldn't create xmlrpc server
if pkg_spec:
(self.project_name, self.version, self.all_versions) = \
self.parse_pkg_ver(want_installed)
if want_installed and not self.project_name:
logger.error("%s is not installed." % pkg_spec[0])
return 1
#I could prefix all these with 'cmd_' and the methods also
#and then iterate over the `options` dictionary keys...
commands = ['show_deps', 'query_metadata_pypi', 'fetch',
'versions_available', 'show_updates', 'browse_website',
'show_download_links', 'pypi_search', 'show_pypi_changelog',
'show_pypi_releases', 'yolk_version', 'show_all',
'show_active', 'show_non_active', 'show_entry_map',
'show_entry_points']
#Run first command it finds, and only the first command, then return
#XXX: Check if more than one command was set in options and give error?
for action in commands:
if getattr(self.options, action):
return getattr(self, action)()
opt_parser.print_help() |
Check installed packages for available updates on PyPI
@param project_name: optional package name to check; checks every
installed pacakge if none specified
@type project_name: string
@returns: None
def show_updates(self):
"""
Check installed packages for available updates on PyPI
@param project_name: optional package name to check; checks every
installed pacakge if none specified
@type project_name: string
@returns: None
"""
dists = Distributions()
if self.project_name:
#Check for a single package
pkg_list = [self.project_name]
else:
#Check for every installed package
pkg_list = get_pkglist()
found = None
for pkg in pkg_list:
for (dist, active) in dists.get_distributions("all", pkg,
dists.get_highest_installed(pkg)):
(project_name, versions) = \
self.pypi.query_versions_pypi(dist.project_name)
if versions:
#PyPI returns them in chronological order,
#but who knows if its guaranteed in the API?
#Make sure we grab the highest version:
newest = get_highest_version(versions)
if newest != dist.version:
#We may have newer than what PyPI knows about
if pkg_resources.parse_version(dist.version) < \
pkg_resources.parse_version(newest):
found = True
print(" %s %s (%s)" % (project_name, dist.version,
newest))
if not found and self.project_name:
self.logger.info("You have the latest version installed.")
elif not found:
self.logger.info("No newer packages found at The Cheese Shop")
return 0 |
Show list of installed activated OR non-activated packages
@param show: type of pkgs to show (all, active or nonactive)
@type show: string
@returns: None or 2 if error
def show_distributions(self, show):
"""
Show list of installed activated OR non-activated packages
@param show: type of pkgs to show (all, active or nonactive)
@type show: string
@returns: None or 2 if error
"""
show_metadata = self.options.metadata
#Search for any plugins with active CLI options with add_column() method
plugins = self.get_plugin("add_column")
#Some locations show false positive for 'development' packages:
ignores = ["/UNIONFS", "/KNOPPIX.IMG"]
#Check if we're in a workingenv
#See http://cheeseshop.python.org/pypi/workingenv.py
workingenv = os.environ.get('WORKING_ENV')
if workingenv:
ignores.append(workingenv)
dists = Distributions()
results = None
for (dist, active) in dists.get_distributions(show, self.project_name,
self.version):
metadata = get_metadata(dist)
for prefix in ignores:
if dist.location.startswith(prefix):
dist.location = dist.location.replace(prefix, "")
#Case-insensitve search because of Windows
if dist.location.lower().startswith(get_python_lib().lower()):
develop = ""
else:
develop = dist.location
if metadata:
add_column_text = ""
for my_plugin in plugins:
#See if package is 'owned' by a package manager such as
#portage, apt, rpm etc.
#add_column_text += my_plugin.add_column(filename) + " "
add_column_text += my_plugin.add_column(dist) + " "
self.print_metadata(metadata, develop, active, add_column_text)
else:
print(str(dist) + " has no metadata")
results = True
if not results and self.project_name:
if self.version:
pkg_spec = "%s==%s" % (self.project_name, self.version)
else:
pkg_spec = "%s" % self.project_name
if show == "all":
self.logger.error("There are no versions of %s installed." \
% pkg_spec)
else:
self.logger.error("There are no %s versions of %s installed." \
% \
(show, pkg_spec))
return 2
elif show == "all" and results and self.options.fields:
print("Versions with '*' are non-active.")
print("Versions with '!' are deployed in development mode.") |
Print out formatted metadata
@param metadata: package's metadata
@type metadata: pkg_resources Distribution obj
@param develop: path to pkg if its deployed in development mode
@type develop: string
@param active: show if package is activated or not
@type active: boolean
@param installed_by: Shows if pkg was installed by a package manager other
than setuptools
@type installed_by: string
@returns: None
def print_metadata(self, metadata, develop, active, installed_by):
"""
Print out formatted metadata
@param metadata: package's metadata
@type metadata: pkg_resources Distribution obj
@param develop: path to pkg if its deployed in development mode
@type develop: string
@param active: show if package is activated or not
@type active: boolean
@param installed_by: Shows if pkg was installed by a package manager other
than setuptools
@type installed_by: string
@returns: None
"""
show_metadata = self.options.metadata
if self.options.fields:
fields = self.options.fields.split(',')
fields = map(str.strip, fields)
else:
fields = []
version = metadata['Version']
#When showing all packages, note which are not active:
if active:
if fields:
active_status = ""
else:
active_status = "active"
else:
if fields:
active_status = "*"
else:
active_status = "non-active"
if develop:
if fields:
development_status = "! (%s)" % develop
else:
development_status = "development (%s)" % develop
else:
development_status = installed_by
status = "%s %s" % (active_status, development_status)
if fields:
print('%s (%s)%s %s' % (metadata['Name'], version, active_status,
development_status))
else:
# Need intelligent justification
print(metadata['Name'].ljust(15) + " - " + version.ljust(12) + \
" - " + status)
if fields:
#Only show specific fields, using case-insensitive search
fields = map(str.lower, fields)
for field in metadata.keys():
if field.lower() in fields:
print(' %s: %s' % (field, metadata[field]))
print()
elif show_metadata:
#Print all available metadata fields
for field in metadata.keys():
if field != 'Name' and field != 'Summary':
print(' %s: %s' % (field, metadata[field])) |
Show dependencies for package(s)
@returns: 0 - sucess 1 - No dependency info supplied
def show_deps(self):
"""
Show dependencies for package(s)
@returns: 0 - sucess 1 - No dependency info supplied
"""
pkgs = pkg_resources.Environment()
for pkg in pkgs[self.project_name]:
if not self.version:
print(pkg.project_name, pkg.version)
i = len(pkg._dep_map.values()[0])
if i:
while i:
if not self.version or self.version and \
pkg.version == self.version:
if self.version and i == len(pkg._dep_map.values()[0]):
print(pkg.project_name, pkg.version)
print(" " + str(pkg._dep_map.values()[0][i - 1]))
i -= 1
else:
self.logger.info(\
"No dependency information was supplied with the package.")
return 1
return 0 |
Show detailed PyPI ChangeLog for the last `hours`
@returns: 0 = sucess or 1 if failed to retrieve from XML-RPC server
def show_pypi_changelog(self):
"""
Show detailed PyPI ChangeLog for the last `hours`
@returns: 0 = sucess or 1 if failed to retrieve from XML-RPC server
"""
hours = self.options.show_pypi_changelog
if not hours.isdigit():
self.logger.error("Error: You must supply an integer.")
return 1
try:
changelog = self.pypi.changelog(int(hours))
except XMLRPCFault as err_msg:
self.logger.error(err_msg)
self.logger.error("ERROR: Couldn't retrieve changelog.")
return 1
last_pkg = ''
for entry in changelog:
pkg = entry[0]
if pkg != last_pkg:
print("%s %s\n\t%s" % (entry[0], entry[1], entry[3]))
last_pkg = pkg
else:
print("\t%s" % entry[3])
return 0 |
Show PyPI releases for the last number of `hours`
@returns: 0 = success or 1 if failed to retrieve from XML-RPC server
def show_pypi_releases(self):
"""
Show PyPI releases for the last number of `hours`
@returns: 0 = success or 1 if failed to retrieve from XML-RPC server
"""
try:
hours = int(self.options.show_pypi_releases)
except ValueError:
self.logger.error("ERROR: You must supply an integer.")
return 1
try:
latest_releases = self.pypi.updated_releases(hours)
except XMLRPCFault as err_msg:
self.logger.error(err_msg)
self.logger.error("ERROR: Couldn't retrieve latest releases.")
return 1
for release in latest_releases:
print("%s %s" % (release[0], release[1]))
return 0 |
Query PyPI for pkg download URI for a packge
@returns: 0
def show_download_links(self):
"""
Query PyPI for pkg download URI for a packge
@returns: 0
"""
#In case they specify version as 'dev' instead of using -T svn,
#don't show three svn URI's
if self.options.file_type == "all" and self.version == "dev":
self.options.file_type = "svn"
if self.options.file_type == "svn":
version = "dev"
else:
if self.version:
version = self.version
else:
version = self.all_versions[0]
if self.options.file_type == "all":
#Search for source, egg, and svn
self.print_download_uri(version, True)
self.print_download_uri(version, False)
self.print_download_uri("dev", True)
else:
if self.options.file_type == "source":
source = True
else:
source = False
self.print_download_uri(version, source)
return 0 |
@param version: version number or 'dev' for svn
@type version: string
@param source: download source or egg
@type source: boolean
@returns: None
def print_download_uri(self, version, source):
"""
@param version: version number or 'dev' for svn
@type version: string
@param source: download source or egg
@type source: boolean
@returns: None
"""
if version == "dev":
pkg_type = "subversion"
source = True
elif source:
pkg_type = "source"
else:
pkg_type = "egg"
#Use setuptools monkey-patch to grab url
url = get_download_uri(self.project_name, version, source,
self.options.pypi_index)
if url:
print("%s" % url)
else:
self.logger.info("No download URL found for %s" % pkg_type) |
Download a package
@returns: 0 = success or 1 if failed download
def fetch(self):
"""
Download a package
@returns: 0 = success or 1 if failed download
"""
#Default type to download
source = True
directory = "."
if self.options.file_type == "svn":
version = "dev"
svn_uri = get_download_uri(self.project_name, \
"dev", True)
if svn_uri:
directory = self.project_name + "_svn"
return self.fetch_svn(svn_uri, directory)
else:
self.logger.error(\
"ERROR: No subversion repository found for %s" % \
self.project_name)
return 1
elif self.options.file_type == "source":
source = True
elif self.options.file_type == "egg":
source = False
uri = get_download_uri(self.project_name, self.version, source)
if uri:
return self.fetch_uri(directory, uri)
else:
self.logger.error("No %s URI found for package: %s " % \
(self.options.file_type, self.project_name))
return 1 |
Use ``urllib.urlretrieve`` to download package to file in sandbox dir.
@param directory: directory to download to
@type directory: string
@param uri: uri to download
@type uri: string
@returns: 0 = success or 1 for failed download
def fetch_uri(self, directory, uri):
"""
Use ``urllib.urlretrieve`` to download package to file in sandbox dir.
@param directory: directory to download to
@type directory: string
@param uri: uri to download
@type uri: string
@returns: 0 = success or 1 for failed download
"""
filename = os.path.basename(urlparse(uri)[2])
if os.path.exists(filename):
self.logger.error("ERROR: File exists: " + filename)
return 1
try:
downloaded_filename, headers = urlretrieve(uri, filename)
self.logger.info("Downloaded ./" + filename)
except IOError as err_msg:
self.logger.error("Error downloading package %s from URL %s" \
% (filename, uri))
self.logger.error(str(err_msg))
return 1
if headers.gettype() in ["text/html"]:
dfile = open(downloaded_filename)
if re.search("404 Not Found", "".join(dfile.readlines())):
dfile.close()
self.logger.error("'404 Not Found' error")
return 1
dfile.close()
return 0 |
Fetch subversion repository
@param svn_uri: subversion repository uri to check out
@type svn_uri: string
@param directory: directory to download to
@type directory: string
@returns: 0 = success or 1 for failed download
def fetch_svn(self, svn_uri, directory):
"""
Fetch subversion repository
@param svn_uri: subversion repository uri to check out
@type svn_uri: string
@param directory: directory to download to
@type directory: string
@returns: 0 = success or 1 for failed download
"""
if not command_successful("svn --version"):
self.logger.error("ERROR: Do you have subversion installed?")
return 1
if os.path.exists(directory):
self.logger.error("ERROR: Checkout directory exists - %s" \
% directory)
return 1
try:
os.mkdir(directory)
except OSError as err_msg:
self.logger.error("ERROR: " + str(err_msg))
return 1
cwd = os.path.realpath(os.curdir)
os.chdir(directory)
self.logger.info("Doing subversion checkout for %s" % svn_uri)
status, output = run_command("/usr/bin/svn co %s" % svn_uri)
self.logger.info(output)
os.chdir(cwd)
self.logger.info("subversion checkout is in directory './%s'" \
% directory)
return 0 |
Launch web browser at project's homepage
@param browser: name of web browser to use
@type browser: string
@returns: 0 if homepage found, 1 if no homepage found
def browse_website(self, browser=None):
"""
Launch web browser at project's homepage
@param browser: name of web browser to use
@type browser: string
@returns: 0 if homepage found, 1 if no homepage found
"""
if len(self.all_versions):
metadata = self.pypi.release_data(self.project_name, \
self.all_versions[0])
self.logger.debug("DEBUG: browser: %s" % browser)
if metadata.has_key("home_page"):
self.logger.info("Launching browser: %s" \
% metadata["home_page"])
if browser == 'konqueror':
browser = webbrowser.Konqueror()
else:
browser = webbrowser.get()
browser.open(metadata["home_page"], 2)
return 0
self.logger.error("No homepage URL found.")
return 1 |
Show pkg metadata queried from PyPI
@returns: 0
def query_metadata_pypi(self):
"""
Show pkg metadata queried from PyPI
@returns: 0
"""
if self.version and self.version in self.all_versions:
metadata = self.pypi.release_data(self.project_name, self.version)
else:
#Give highest version
metadata = self.pypi.release_data(self.project_name, \
self.all_versions[0])
if metadata:
for key in metadata.keys():
if not self.options.fields or (self.options.fields and \
self.options.fields==key):
print("%s: %s" % (key, metadata[key]))
return 0 |
Query PyPI for a particular version or all versions of a package
@returns: 0 if version(s) found or 1 if none found
def versions_available(self):
"""
Query PyPI for a particular version or all versions of a package
@returns: 0 if version(s) found or 1 if none found
"""
if self.version:
spec = "%s==%s" % (self.project_name, self.version)
else:
spec = self.project_name
if self.all_versions and self.version in self.all_versions:
print_pkg_versions(self.project_name, [self.version])
elif not self.version and self.all_versions:
print_pkg_versions(self.project_name, self.all_versions)
else:
if self.version:
self.logger.error("No pacakge found for version %s" \
% self.version)
else:
self.logger.error("No pacakge found for %s" % self.project_name)
return 1
return 0 |
Parse search args and return spec dict for PyPI
* Owwww, my eyes!. Re-write this.
@param spec: Cheese Shop package search spec
e.g.
name=Cheetah
license=ZPL
license=ZPL AND name=Cheetah
@type spec: string
@returns: tuple with spec and operator
def parse_search_spec(self, spec):
"""
Parse search args and return spec dict for PyPI
* Owwww, my eyes!. Re-write this.
@param spec: Cheese Shop package search spec
e.g.
name=Cheetah
license=ZPL
license=ZPL AND name=Cheetah
@type spec: string
@returns: tuple with spec and operator
"""
usage = \
"""You can search PyPI by the following:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
e.g. yolk -S name=Cheetah
yolk -S name=yolk AND license=PSF
"""
if not spec:
self.logger.error(usage)
return (None, None)
try:
spec = (" ").join(spec)
operator = 'AND'
first = second = ""
if " AND " in spec:
(first, second) = spec.split('AND')
elif " OR " in spec:
(first, second) = spec.split('OR')
operator = 'OR'
else:
first = spec
(key1, term1) = first.split('=')
key1 = key1.strip()
if second:
(key2, term2) = second.split('=')
key2 = key2.strip()
spec = {}
spec[key1] = term1
if second:
spec[key2] = term2
except:
self.logger.error(usage)
spec = operator = None
return (spec, operator) |
Search PyPI by metadata keyword
e.g. yolk -S name=yolk AND license=GPL
@param spec: Cheese Shop search spec
@type spec: list of strings
spec examples:
["name=yolk"]
["license=GPL"]
["name=yolk", "AND", "license=GPL"]
@returns: 0 on success or 1 if mal-formed search spec
def pypi_search(self):
"""
Search PyPI by metadata keyword
e.g. yolk -S name=yolk AND license=GPL
@param spec: Cheese Shop search spec
@type spec: list of strings
spec examples:
["name=yolk"]
["license=GPL"]
["name=yolk", "AND", "license=GPL"]
@returns: 0 on success or 1 if mal-formed search spec
"""
spec = self.pkg_spec
#Add remainging cli arguments to options.pypi_search
search_arg = self.options.pypi_search
spec.insert(0, search_arg.strip())
(spec, operator) = self.parse_search_spec(spec)
if not spec:
return 1
for pkg in self.pypi.search(spec, operator):
if pkg['summary']:
summary = pkg['summary'].encode('utf-8')
else:
summary = ""
print("""%s (%s):
%s
""" % (pkg['name'].encode('utf-8'), pkg["version"],
summary))
return 0 |
Show entry map for a package
@param dist: package
@param type: srting
@returns: 0 for success or 1 if error
def show_entry_map(self):
"""
Show entry map for a package
@param dist: package
@param type: srting
@returns: 0 for success or 1 if error
"""
pprinter = pprint.PrettyPrinter()
try:
entry_map = pkg_resources.get_entry_map(self.options.show_entry_map)
if entry_map:
pprinter.pprint(entry_map)
except pkg_resources.DistributionNotFound:
self.logger.error("Distribution not found: %s" \
% self.options.show_entry_map)
return 1
return 0 |
Show entry points for a module
@returns: 0 for success or 1 if error
def show_entry_points(self):
"""
Show entry points for a module
@returns: 0 for success or 1 if error
"""
found = False
for entry_point in \
pkg_resources.iter_entry_points(self.options.show_entry_points):
found = True
try:
plugin = entry_point.load()
print(plugin.__module__)
print(" %s" % entry_point)
if plugin.__doc__:
print(plugin.__doc__)
print
except ImportError:
pass
if not found:
self.logger.error("No entry points found for %s" \
% self.options.show_entry_points)
return 1
return 0 |
Return tuple with project_name and version from CLI args
If the user gave the wrong case for the project name, this corrects it
@param want_installed: whether package we want is installed or not
@type want_installed: boolean
@returns: tuple(project_name, version, all_versions)
def parse_pkg_ver(self, want_installed):
"""
Return tuple with project_name and version from CLI args
If the user gave the wrong case for the project name, this corrects it
@param want_installed: whether package we want is installed or not
@type want_installed: boolean
@returns: tuple(project_name, version, all_versions)
"""
all_versions = []
arg_str = ("").join(self.pkg_spec)
if "==" not in arg_str:
#No version specified
project_name = arg_str
version = None
else:
(project_name, version) = arg_str.split("==")
project_name = project_name.strip()
version = version.strip()
#Find proper case for package name
if want_installed:
dists = Distributions()
project_name = dists.case_sensitive_name(project_name)
else:
(project_name, all_versions) = \
self.pypi.query_versions_pypi(project_name)
if not len(all_versions):
msg = "I'm afraid we have no '%s' at " % project_name
msg += "The Cheese Shop. A little Red Leicester, perhaps?"
self.logger.error(msg)
sys.exit(2)
return (project_name, version, all_versions) |
Install a backport import hook for Qt4 api
Parameters
----------
api : str
The Qt4 api whose structure should be intercepted
('pyqt4' or 'pyside').
Example
-------
>>> install_backport_hook("pyqt4")
>>> import PyQt4
Loaded module AnyQt._backport as a substitute for PyQt4
def install_backport_hook(api):
"""
Install a backport import hook for Qt4 api
Parameters
----------
api : str
The Qt4 api whose structure should be intercepted
('pyqt4' or 'pyside').
Example
-------
>>> install_backport_hook("pyqt4")
>>> import PyQt4
Loaded module AnyQt._backport as a substitute for PyQt4
"""
if api == USED_API:
raise ValueError
sys.meta_path.insert(0, ImportHookBackport(api)) |
Install a deny import hook for Qt api.
Parameters
----------
api : str
The Qt api whose import should be prevented
Example
-------
>>> install_deny_import("pyqt4")
>>> import PyQt4
Traceback (most recent call last):...
ImportError: Import of PyQt4 is denied.
def install_deny_hook(api):
"""
Install a deny import hook for Qt api.
Parameters
----------
api : str
The Qt api whose import should be prevented
Example
-------
>>> install_deny_import("pyqt4")
>>> import PyQt4
Traceback (most recent call last):...
ImportError: Import of PyQt4 is denied.
"""
if api == USED_API:
raise ValueError
sys.meta_path.insert(0, ImportHookDeny(api)) |
Run command and return its return status code and its output
def run_command(cmd, env=None, max_timeout=None):
"""
Run command and return its return status code and its output
"""
arglist = cmd.split()
output = os.tmpfile()
try:
pipe = Popen(arglist, stdout=output, stderr=STDOUT, env=env)
except Exception as errmsg:
return 1, errmsg
# Wait only max_timeout seconds.
if max_timeout:
start = time.time()
while pipe.poll() is None:
time.sleep(0.1)
if time.time() - start > max_timeout:
os.kill(pipe.pid, signal.SIGINT)
pipe.wait()
return 1, "Time exceeded"
pipe.wait()
output.seek(0)
return pipe.returncode, output.read() |
Iterate over a slack API method supporting pagination
When using :class:`slack.methods` the request is made `as_json` if available
Args:
url: :class:`slack.methods` or url string
data: JSON encodable MutableMapping
headers:
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
minimum_time: Minimum elapsed time (in seconds) between two calls to the Slack API (default to 0).
If not reached the client will sleep for the remaining time.
as_json: Post JSON to the slack API
Returns:
Async iterator over `response_data[key]`
async def iter(
self,
url: Union[str, methods],
data: Optional[MutableMapping] = None,
headers: Optional[MutableMapping] = None,
*,
limit: int = 200,
iterkey: Optional[str] = None,
itermode: Optional[str] = None,
minimum_time: Optional[int] = None,
as_json: Optional[bool] = None
) -> AsyncIterator[dict]:
"""
Iterate over a slack API method supporting pagination
When using :class:`slack.methods` the request is made `as_json` if available
Args:
url: :class:`slack.methods` or url string
data: JSON encodable MutableMapping
headers:
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
minimum_time: Minimum elapsed time (in seconds) between two calls to the Slack API (default to 0).
If not reached the client will sleep for the remaining time.
as_json: Post JSON to the slack API
Returns:
Async iterator over `response_data[key]`
"""
itervalue = None
if not data:
data = {}
last_request_time = None
while True:
current_time = time.time()
if (
minimum_time
and last_request_time
and last_request_time + minimum_time > current_time
):
await self.sleep(last_request_time + minimum_time - current_time)
data, iterkey, itermode = sansio.prepare_iter_request(
url,
data,
iterkey=iterkey,
itermode=itermode,
limit=limit,
itervalue=itervalue,
)
last_request_time = time.time()
response_data = await self.query(url, data, headers, as_json)
itervalue = sansio.decode_iter_request(response_data)
for item in response_data[iterkey]:
yield item
if not itervalue:
break |
Connect and discard incoming RTM event if necessary.
:param url: Websocket url
:param bot_id: Bot ID
:return: Incoming events
async def _incoming_from_rtm(
self, url: str, bot_id: str
) -> AsyncIterator[events.Event]:
"""
Connect and discard incoming RTM event if necessary.
:param url: Websocket url
:param bot_id: Bot ID
:return: Incoming events
"""
async for data in self._rtm(url):
event = events.Event.from_rtm(json.loads(data))
if sansio.need_reconnect(event):
break
elif sansio.discard_event(event, bot_id):
continue
else:
yield event |
Displays the login form and handles the login action.
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name, ''))
if request.method == "POST":
form = authentication_form(data=request.POST, request=request)
if form.is_valid():
netloc = urlparse(redirect_to)[1]
# Use default setting if redirect_to is empty
if not redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
# Heavier security check -- don't allow redirection to a different
# host.
elif netloc and netloc != request.get_host():
redirect_to = settings.LOGIN_REDIRECT_URL
# Okay, security checks complete. Log the user in.
auth_login(request, form.get_user())
return redirect(redirect_to)
else:
form = authentication_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
request.current_app = current_app
return TemplateResponse(request, template_name, context) |
Returns True if package manager 'owns' file
Returns False if package manager does not 'own' file
There is currently no way to determine if distutils or
setuptools installed a package. A future feature of setuptools
will make a package manifest which can be checked.
'filename' must be the full path to file
def package_manager_owns(self, dist):
"""
Returns True if package manager 'owns' file
Returns False if package manager does not 'own' file
There is currently no way to determine if distutils or
setuptools installed a package. A future feature of setuptools
will make a package manifest which can be checked.
'filename' must be the full path to file
"""
#Installed by distutils/setuptools or external package manager?
#If location is in site-packages dir, check for .egg-info file
if dist.location.lower() == get_python_lib().lower():
filename = os.path.join(dist.location, dist.egg_name() + ".egg-info")
else:
filename = dist.location
status, output = getstatusoutput("/usr/bin/acmefile -q %s" % filename)
#status == 0 (file was installed by Acme)
#status == 256 (file was not installed by Acme)
if status == 0:
return self.name
else:
return "" |
If the environmental variable 'HTTP_PROXY' is set, it will most likely be
in one of these forms:
proxyhost:8080
http://proxyhost:8080
urlllib2 requires the proxy URL to start with 'http://'
This routine does that, and returns the transport for xmlrpc.
def check_proxy_setting():
"""
If the environmental variable 'HTTP_PROXY' is set, it will most likely be
in one of these forms:
proxyhost:8080
http://proxyhost:8080
urlllib2 requires the proxy URL to start with 'http://'
This routine does that, and returns the transport for xmlrpc.
"""
try:
http_proxy = os.environ['HTTP_PROXY']
except KeyError:
return
if not http_proxy.startswith('http://'):
match = re.match('(http://)?([-_\.A-Za-z]+):(\d+)', http_proxy)
#if not match:
# raise Exception('Proxy format not recognised: [%s]' % http_proxy)
os.environ['HTTP_PROXY'] = 'http://%s:%s' % (match.group(2),
match.group(3))
return |
Returns URL of specified file type
'source', 'egg', or 'all'
def filter_url(pkg_type, url):
"""
Returns URL of specified file type
'source', 'egg', or 'all'
"""
bad_stuff = ["?modtime", "#md5="]
for junk in bad_stuff:
if junk in url:
url = url.split(junk)[0]
break
#pkg_spec==dev (svn)
if url.endswith("-dev"):
url = url.split("#egg=")[0]
if pkg_type == "all":
return url
elif pkg_type == "source":
valid_source_types = [".tgz", ".tar.gz", ".zip", ".tbz2", ".tar.bz2"]
for extension in valid_source_types:
if url.lower().endswith(extension):
return url
elif pkg_type == "egg":
if url.lower().endswith(".egg"):
return url |
Send xml-rpc request using proxy
def request(self, host, handler, request_body, verbose):
'''Send xml-rpc request using proxy'''
#We get a traceback if we don't have this attribute:
self.verbose = verbose
url = 'http://' + host + handler
request = urllib2.Request(url)
request.add_data(request_body)
# Note: 'Host' and 'Content-Length' are added automatically
request.add_header('User-Agent', self.user_agent)
request.add_header('Content-Type', 'text/xml')
proxy_handler = urllib2.ProxyHandler()
opener = urllib2.build_opener(proxy_handler)
fhandle = opener.open(request)
return(self.parse_response(fhandle)) |
Get a package name list from disk cache or PyPI
def get_cache(self):
"""
Get a package name list from disk cache or PyPI
"""
#This is used by external programs that import `CheeseShop` and don't
#want a cache file written to ~/.pypi and query PyPI every time.
if self.no_cache:
self.pkg_list = self.list_packages()
return
if not os.path.exists(self.yolk_dir):
os.mkdir(self.yolk_dir)
if os.path.exists(self.pkg_cache_file):
self.pkg_list = self.query_cached_package_list()
else:
self.logger.debug("DEBUG: Fetching package list cache from PyPi...")
self.fetch_pkg_list() |
Returns PyPI's XML-RPC server instance
def get_xmlrpc_server(self):
"""
Returns PyPI's XML-RPC server instance
"""
check_proxy_setting()
if os.environ.has_key('XMLRPC_DEBUG'):
debug = 1
else:
debug = 0
try:
return xmlrpclib.Server(XML_RPC_SERVER, transport=ProxyTransport(), verbose=debug)
except IOError:
self.logger("ERROR: Can't connect to XML-RPC server: %s" \
% XML_RPC_SERVER) |
Fetch list of available versions for a package from The CheeseShop
def query_versions_pypi(self, package_name):
"""Fetch list of available versions for a package from The CheeseShop"""
if not package_name in self.pkg_list:
self.logger.debug("Package %s not in cache, querying PyPI..." \
% package_name)
self.fetch_pkg_list()
#I have to set version=[] for edge cases like "Magic file extensions"
#but I'm not sure why this happens. It's included with Python or
#because it has a space in it's name?
versions = []
for pypi_pkg in self.pkg_list:
if pypi_pkg.lower() == package_name.lower():
if self.debug:
self.logger.debug("DEBUG: %s" % package_name)
versions = self.package_releases(pypi_pkg)
package_name = pypi_pkg
break
return (package_name, versions) |
Return list of pickled package names from PYPI
def query_cached_package_list(self):
"""Return list of pickled package names from PYPI"""
if self.debug:
self.logger.debug("DEBUG: reading pickled cache file")
return cPickle.load(open(self.pkg_cache_file, "r")) |
Fetch and cache master list of package names from PYPI
def fetch_pkg_list(self):
"""Fetch and cache master list of package names from PYPI"""
self.logger.debug("DEBUG: Fetching package name list from PyPI")
package_list = self.list_packages()
cPickle.dump(package_list, open(self.pkg_cache_file, "w"))
self.pkg_list = package_list |
Query PYPI via XMLRPC interface using search spec
def search(self, spec, operator):
'''Query PYPI via XMLRPC interface using search spec'''
return self.xmlrpc.search(spec, operator.lower()) |
Query PYPI via XMLRPC interface for a pkg's metadata
def release_data(self, package_name, version):
"""Query PYPI via XMLRPC interface for a pkg's metadata"""
try:
return self.xmlrpc.release_data(package_name, version)
except xmlrpclib.Fault:
#XXX Raises xmlrpclib.Fault if you give non-existant version
#Could this be server bug?
return |
Query PYPI via XMLRPC interface for a pkg's available versions
def package_releases(self, package_name):
"""Query PYPI via XMLRPC interface for a pkg's available versions"""
if self.debug:
self.logger.debug("DEBUG: querying PyPI for versions of " \
+ package_name)
return self.xmlrpc.package_releases(package_name) |
Query PyPI for pkg download URI for a packge
def get_download_urls(self, package_name, version="", pkg_type="all"):
"""Query PyPI for pkg download URI for a packge"""
if version:
versions = [version]
else:
#If they don't specify version, show em all.
(package_name, versions) = self.query_versions_pypi(package_name)
all_urls = []
for ver in versions:
metadata = self.release_data(package_name, ver)
for urls in self.release_urls(package_name, ver):
if pkg_type == "source" and urls['packagetype'] == "sdist":
all_urls.append(urls['url'])
elif pkg_type == "egg" and \
urls['packagetype'].startswith("bdist"):
all_urls.append(urls['url'])
elif pkg_type == "all":
#All
all_urls.append(urls['url'])
#Try the package's metadata directly in case there's nothing
#returned by XML-RPC's release_urls()
if metadata and metadata.has_key('download_url') and \
metadata['download_url'] != "UNKNOWN" and \
metadata['download_url'] != None:
if metadata['download_url'] not in all_urls:
if pkg_type != "all":
url = filter_url(pkg_type, metadata['download_url'])
if url:
all_urls.append(url)
return all_urls |
Clone the event
Returns:
:class:`slack.events.Event`
def clone(self) -> "Event":
"""
Clone the event
Returns:
:class:`slack.events.Event`
"""
return self.__class__(copy.deepcopy(self.event), copy.deepcopy(self.metadata)) |
Create an event with data coming from the RTM API.
If the event type is a message a :class:`slack.events.Message` is returned.
Args:
raw_event: JSON decoded data from the RTM API
Returns:
:class:`slack.events.Event` or :class:`slack.events.Message`
def from_rtm(cls, raw_event: MutableMapping) -> "Event":
"""
Create an event with data coming from the RTM API.
If the event type is a message a :class:`slack.events.Message` is returned.
Args:
raw_event: JSON decoded data from the RTM API
Returns:
:class:`slack.events.Event` or :class:`slack.events.Message`
"""
if raw_event["type"].startswith("message"):
return Message(raw_event)
else:
return Event(raw_event) |
Create an event with data coming from the HTTP Event API.
If the event type is a message a :class:`slack.events.Message` is returned.
Args:
raw_body: Decoded body of the Event API request
verification_token: Slack verification token used to verify the request came from slack
team_id: Verify the event is for the correct team
Returns:
:class:`slack.events.Event` or :class:`slack.events.Message`
Raises:
:class:`slack.exceptions.FailedVerification`: when `verification_token` or `team_id` does not match the
incoming event's.
def from_http(
cls,
raw_body: MutableMapping,
verification_token: Optional[str] = None,
team_id: Optional[str] = None,
) -> "Event":
"""
Create an event with data coming from the HTTP Event API.
If the event type is a message a :class:`slack.events.Message` is returned.
Args:
raw_body: Decoded body of the Event API request
verification_token: Slack verification token used to verify the request came from slack
team_id: Verify the event is for the correct team
Returns:
:class:`slack.events.Event` or :class:`slack.events.Message`
Raises:
:class:`slack.exceptions.FailedVerification`: when `verification_token` or `team_id` does not match the
incoming event's.
"""
if verification_token and raw_body["token"] != verification_token:
raise exceptions.FailedVerification(raw_body["token"], raw_body["team_id"])
if team_id and raw_body["team_id"] != team_id:
raise exceptions.FailedVerification(raw_body["token"], raw_body["team_id"])
if raw_body["event"]["type"].startswith("message"):
return Message(raw_body["event"], metadata=raw_body)
else:
return Event(raw_body["event"], metadata=raw_body) |
Create a response message.
Depending on the incoming message the response can be in a thread. By default the response follow where the
incoming message was posted.
Args:
in_thread (boolean): Overwrite the `threading` behaviour
Returns:
a new :class:`slack.event.Message`
def response(self, in_thread: Optional[bool] = None) -> "Message":
"""
Create a response message.
Depending on the incoming message the response can be in a thread. By default the response follow where the
incoming message was posted.
Args:
in_thread (boolean): Overwrite the `threading` behaviour
Returns:
a new :class:`slack.event.Message`
"""
data = {"channel": self["channel"]}
if in_thread:
if "message" in self:
data["thread_ts"] = (
self["message"].get("thread_ts") or self["message"]["ts"]
)
else:
data["thread_ts"] = self.get("thread_ts") or self["ts"]
elif in_thread is None:
if "message" in self and "thread_ts" in self["message"]:
data["thread_ts"] = self["message"]["thread_ts"]
elif "thread_ts" in self:
data["thread_ts"] = self["thread_ts"]
return Message(data) |
Serialize the message for sending to slack API
Returns:
serialized message
def serialize(self) -> dict:
"""
Serialize the message for sending to slack API
Returns:
serialized message
"""
data = {**self}
if "attachments" in self:
data["attachments"] = json.dumps(self["attachments"])
return data |
Register a new handler for a specific :class:`slack.events.Event` `type` (See `slack event types documentation
<https://api.slack.com/events>`_ for a list of event types).
The arbitrary keyword argument is used as a key/value pair to compare against what is in the incoming
:class:`slack.events.Event`
Args:
event_type: Event type the handler is interested in
handler: Callback
**detail: Additional key for routing
def register(self, event_type: str, handler: Any, **detail: Any) -> None:
"""
Register a new handler for a specific :class:`slack.events.Event` `type` (See `slack event types documentation
<https://api.slack.com/events>`_ for a list of event types).
The arbitrary keyword argument is used as a key/value pair to compare against what is in the incoming
:class:`slack.events.Event`
Args:
event_type: Event type the handler is interested in
handler: Callback
**detail: Additional key for routing
"""
LOG.info("Registering %s, %s to %s", event_type, detail, handler)
if len(detail) > 1:
raise ValueError("Only one detail can be provided for additional routing")
elif not detail:
detail_key, detail_value = "*", "*"
else:
detail_key, detail_value = detail.popitem()
if detail_key not in self._routes[event_type]:
self._routes[event_type][detail_key] = {}
if detail_value not in self._routes[event_type][detail_key]:
self._routes[event_type][detail_key][detail_value] = []
self._routes[event_type][detail_key][detail_value].append(handler) |
Yields handlers matching the routing of the incoming :class:`slack.events.Event`.
Args:
event: :class:`slack.events.Event`
Yields:
handler
def dispatch(self, event: Event) -> Iterator[Any]:
"""
Yields handlers matching the routing of the incoming :class:`slack.events.Event`.
Args:
event: :class:`slack.events.Event`
Yields:
handler
"""
LOG.debug('Dispatching event "%s"', event.get("type"))
if event["type"] in self._routes:
for detail_key, detail_values in self._routes.get(
event["type"], {}
).items():
event_value = event.get(detail_key, "*")
yield from detail_values.get(event_value, [])
else:
return |
Register a new handler for a specific :class:`slack.events.Message`.
The routing is based on regex pattern matching the message text and the incoming slack channel.
Args:
pattern: Regex pattern matching the message text.
handler: Callback
flags: Regex flags.
channel: Slack channel ID. Use * for any.
subtype: Message subtype
def register(
self,
pattern: str,
handler: Any,
flags: int = 0,
channel: str = "*",
subtype: Optional[str] = None,
) -> None:
"""
Register a new handler for a specific :class:`slack.events.Message`.
The routing is based on regex pattern matching the message text and the incoming slack channel.
Args:
pattern: Regex pattern matching the message text.
handler: Callback
flags: Regex flags.
channel: Slack channel ID. Use * for any.
subtype: Message subtype
"""
LOG.debug('Registering message endpoint "%s: %s"', pattern, handler)
match = re.compile(pattern, flags)
if subtype not in self._routes[channel]:
self._routes[channel][subtype] = dict()
if match in self._routes[channel][subtype]:
self._routes[channel][subtype][match].append(handler)
else:
self._routes[channel][subtype][match] = [handler] |
Yields handlers matching the routing of the incoming :class:`slack.events.Message`
Args:
message: :class:`slack.events.Message`
Yields:
handler
def dispatch(self, message: Message) -> Iterator[Any]:
"""
Yields handlers matching the routing of the incoming :class:`slack.events.Message`
Args:
message: :class:`slack.events.Message`
Yields:
handler
"""
if "text" in message:
text = message["text"] or ""
elif "message" in message:
text = message["message"].get("text", "")
else:
text = ""
msg_subtype = message.get("subtype")
for subtype, matchs in itertools.chain(
self._routes[message["channel"]].items(), self._routes["*"].items()
):
if msg_subtype == subtype or subtype is None:
for match, endpoints in matchs.items():
if match.search(text):
yield from endpoints |
Query the slack API
When using :class:`slack.methods` the request is made `as_json` if available
Args:
url: :class:`slack.methods` or url string
data: JSON encodable MutableMapping
headers: Custom headers
as_json: Post JSON to the slack API
Returns:
dictionary of slack API response data
def query( # type: ignore
self,
url: Union[str, methods],
data: Optional[MutableMapping] = None,
headers: Optional[MutableMapping] = None,
as_json: Optional[bool] = None,
) -> dict:
"""
Query the slack API
When using :class:`slack.methods` the request is made `as_json` if available
Args:
url: :class:`slack.methods` or url string
data: JSON encodable MutableMapping
headers: Custom headers
as_json: Post JSON to the slack API
Returns:
dictionary of slack API response data
"""
url, body, headers = sansio.prepare_request(
url=url,
data=data,
headers=headers,
global_headers=self._headers,
token=self._token,
)
return self._make_query(url, body, headers) |
Iterate over event from the RTM API
Args:
url: Websocket connection url
bot_id: Connecting bot ID
Returns:
:class:`slack.events.Event` or :class:`slack.events.Message`
def rtm( # type: ignore
self, url: Optional[str] = None, bot_id: Optional[str] = None
) -> Iterator[events.Event]:
"""
Iterate over event from the RTM API
Args:
url: Websocket connection url
bot_id: Connecting bot ID
Returns:
:class:`slack.events.Event` or :class:`slack.events.Message`
"""
while True:
bot_id = bot_id or self._find_bot_id()
url = url or self._find_rtm_url()
for event in self._incoming_from_rtm(url, bot_id):
yield event
url = None |
Displays the login form for the given HttpRequest.
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
context = {
'title': _('Log in'),
'app_path': request.get_full_path(),
}
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = request.get_full_path()
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults) |
Get configuration from a file.
def get_config(config_file):
"""Get configuration from a file."""
def load(fp):
try:
return yaml.safe_load(fp)
except yaml.YAMLError as e:
sys.stderr.write(text_type(e))
sys.exit(1) # TODO document exit codes
if config_file == '-':
return load(sys.stdin)
if not os.path.exists(config_file):
sys.stderr.write('ERROR: Must either run next to config.yaml or'
' specify a config file.\n' + __doc__)
sys.exit(2)
with open(config_file) as fp:
return load(fp) |
Figure out what options to use based on the four places it can come from.
Order of precedence:
* cli_options specified by the user at the command line
* local_options specified in the config file for the metric
* config_options specified in the config file at the base
* DEFAULT_OPTIONS hard coded defaults
def get_options(config_options, local_options, cli_options):
"""
Figure out what options to use based on the four places it can come from.
Order of precedence:
* cli_options specified by the user at the command line
* local_options specified in the config file for the metric
* config_options specified in the config file at the base
* DEFAULT_OPTIONS hard coded defaults
"""
options = DEFAULT_OPTIONS.copy()
if config_options is not None:
options.update(config_options)
if local_options is not None:
options.update(local_options)
if cli_options is not None:
options.update(cli_options)
return options |
Output the results to stdout.
TODO: add AMPQ support for efficiency
def output_results(results, metric, options):
"""
Output the results to stdout.
TODO: add AMPQ support for efficiency
"""
formatter = options['Formatter']
context = metric.copy() # XXX might need to sanitize this
try:
context['dimension'] = list(metric['Dimensions'].values())[0]
except AttributeError:
context['dimension'] = ''
for result in results:
stat_keys = metric['Statistics']
if not isinstance(stat_keys, list):
stat_keys = [stat_keys]
for statistic in stat_keys:
context['statistic'] = statistic
# get and then sanitize metric name, first copy the unit name from the
# result to the context to keep the default format happy
context['Unit'] = result['Unit']
metric_name = (formatter % context).replace('/', '.').lower()
line = '{0} {1} {2}\n'.format(
metric_name,
result[statistic],
timegm(result['Timestamp'].timetuple()),
)
sys.stdout.write(line) |
This method is analogous to "gsutil cp gsuri localpath", but in a
programatically accesible way. The only difference is that we
have to make a guess about the encoding of the file to not upset
downstream file operations. If you are downloading a VCF, then
"False" is great. If this is a BAM file you are asking for, you
should enable the "binary_mode" to make sure file doesn't get
corrupted.
gsuri: full GS-based URI, e.g. gs://cohorts/rocks.txt
localpath: the path for the downloaded file, e.g. /mnt/cohorts/yep.txt
binary_mode: (logical) if yes, the binary file operations will be
used; if not, standard ascii-based ones.
def download_to_path(self, gsuri, localpath, binary_mode=False, tmpdir=None):
"""
This method is analogous to "gsutil cp gsuri localpath", but in a
programatically accesible way. The only difference is that we
have to make a guess about the encoding of the file to not upset
downstream file operations. If you are downloading a VCF, then
"False" is great. If this is a BAM file you are asking for, you
should enable the "binary_mode" to make sure file doesn't get
corrupted.
gsuri: full GS-based URI, e.g. gs://cohorts/rocks.txt
localpath: the path for the downloaded file, e.g. /mnt/cohorts/yep.txt
binary_mode: (logical) if yes, the binary file operations will be
used; if not, standard ascii-based ones.
"""
bucket_name, gs_rel_path = self.parse_uri(gsuri)
# And now request the handles for bucket and the file
bucket = self._client.get_bucket(bucket_name)
# Just assignment, no downloading (yet)
ablob = bucket.get_blob(gs_rel_path)
if not ablob:
raise GoogleStorageIOError(
"No such file on Google Storage: '{}'".format(gs_rel_path))
# A tmp file to serve intermediate phase
# should be on same filesystem as localpath
tmp_fid, tmp_file_path = tempfile.mkstemp(text=(not binary_mode),
dir=tmpdir)
# set chunk_size to reasonable default
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2222
ablob.chunk_size = 1<<30
# Download starts in a sec....
ablob.download_to_filename(client=self._client, filename=tmp_file_path)
# ... end download ends. Let's move our finished file over.
# You will see that below, instead of directly writing to a file
# we are instead first using a different file and then move it to
# its final location. We are doing this because we don't want
# corrupted/incomplete data to be around as much as possible.
return os.rename(tmp_file_path, localpath) |
Accurate float rounding from http://stackoverflow.com/a/15398691.
def round_float(f, digits, rounding=ROUND_HALF_UP):
"""
Accurate float rounding from http://stackoverflow.com/a/15398691.
"""
return Decimal(str(f)).quantize(Decimal(10) ** (-1 * digits),
rounding=rounding) |
Returns a string representing a float, where the number of
significant digits is min_digits unless it takes more digits
to hit a non-zero digit (and the number is 0 < x < 1).
We stop looking for a non-zero digit after max_digits.
def float_str(f, min_digits=2, max_digits=6):
"""
Returns a string representing a float, where the number of
significant digits is min_digits unless it takes more digits
to hit a non-zero digit (and the number is 0 < x < 1).
We stop looking for a non-zero digit after max_digits.
"""
if f >= 1 or f <= 0:
return str(round_float(f, min_digits))
start_str = str(round_float(f, max_digits))
digits = start_str.split(".")[1]
non_zero_indices = []
for i, digit in enumerate(digits):
if digit != "0":
non_zero_indices.append(i + 1)
# Only saw 0s.
if len(non_zero_indices) == 0:
num_digits = min_digits
else:
# Of the non-zero digits, pick the num_digit'th of those (including any zeros)
min_non_zero_indices = range(non_zero_indices[0], non_zero_indices[-1] + 1)[:min_digits]
num_digits = min_non_zero_indices[-1]
return str(round_float(f, num_digits)) |
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
def default_format(self):
"""
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
"""
user = self.user
if user.first_name is not None:
return self.full_name
elif user.username is not None:
return user.username
else:
return str(user.id) |
Returns the first and last name of the user separated by a space.
def full_name(self):
"""
Returns the first and last name of the user separated by a space.
"""
formatted_user = []
if self.user.first_name is not None:
formatted_user.append(self.user.first_name)
if self.user.last_name is not None:
formatted_user.append(self.user.last_name)
return " ".join(formatted_user) |
Returns the full name (first and last parts), and the username between brackets if the user has it.
If there is no info about the user, returns the user id between < and >.
def full_format(self):
"""
Returns the full name (first and last parts), and the username between brackets if the user has it.
If there is no info about the user, returns the user id between < and >.
"""
formatted_user = self.full_name
if self.user.username is not None:
formatted_user += " [" + self.user.username + "]"
if not formatted_user:
formatted_user = self._id()
return formatted_user |
Returns all the info available for the user in the following format:
name [username] <id> (locale) bot_or_user
If any data is not available, it is not added.
def full_data(self):
"""
Returns all the info available for the user in the following format:
name [username] <id> (locale) bot_or_user
If any data is not available, it is not added.
"""
data = [
self.full_name,
self._username(),
self._id(),
self._language_code(),
self._is_bot()
]
return " ".join(filter(None, data)) |
Returns all the info available for the chat in the following format:
title [username] (type) <id>
If any data is not available, it is not added.
def full_data(self):
"""
Returns all the info available for the chat in the following format:
title [username] (type) <id>
If any data is not available, it is not added.
"""
data = [
self.chat.title,
self._username(),
self._type(),
self._id()
]
return " ".join(filter(None, data)) |
Decorator for functions that should automatically fall back to the Cohort-default filter_fn and
normalized_per_mb if not specified.
def use_defaults(func):
"""
Decorator for functions that should automatically fall back to the Cohort-default filter_fn and
normalized_per_mb if not specified.
"""
@wraps(func)
def wrapper(row, cohort, filter_fn=None, normalized_per_mb=None, **kwargs):
filter_fn = first_not_none_param([filter_fn, cohort.filter_fn], no_filter)
normalized_per_mb = first_not_none_param([normalized_per_mb, cohort.normalized_per_mb], False)
return func(row=row,
cohort=cohort,
filter_fn=filter_fn,
normalized_per_mb=normalized_per_mb,
**kwargs)
return wrapper |
Decorator for functions that return a collection (technically a dict of collections) that should be
counted up. Also automatically falls back to the Cohort-default filter_fn and normalized_per_mb if
not specified.
def count_function(func):
"""
Decorator for functions that return a collection (technically a dict of collections) that should be
counted up. Also automatically falls back to the Cohort-default filter_fn and normalized_per_mb if
not specified.
"""
# Fall back to Cohort-level defaults.
@use_defaults
@wraps(func)
def wrapper(row, cohort, filter_fn=None, normalized_per_mb=None, **kwargs):
per_patient_data = func(row=row,
cohort=cohort,
filter_fn=filter_fn,
normalized_per_mb=normalized_per_mb,
**kwargs)
patient_id = row["patient_id"]
if patient_id in per_patient_data:
count = len(per_patient_data[patient_id])
if normalized_per_mb:
count /= float(get_patient_to_mb(cohort)[patient_id])
return count
return np.nan
return wrapper |
Creates a function that counts variants that are filtered by the provided filterable_variant_function.
The filterable_variant_function is a function that takes a filterable_variant and returns True or False.
Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well.
def count_variants_function_builder(function_name, filterable_variant_function=None):
"""
Creates a function that counts variants that are filtered by the provided filterable_variant_function.
The filterable_variant_function is a function that takes a filterable_variant and returns True or False.
Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well.
"""
@count_function
def count(row, cohort, filter_fn, normalized_per_mb, **kwargs):
def count_filter_fn(filterable_variant, **kwargs):
assert filter_fn is not None, "filter_fn should never be None, but it is."
return ((filterable_variant_function(filterable_variant) if filterable_variant_function is not None else True) and
filter_fn(filterable_variant, **kwargs))
patient_id = row["patient_id"]
return cohort.load_variants(
patients=[cohort.patient_from_id(patient_id)],
filter_fn=count_filter_fn,
**kwargs)
count.__name__ = function_name
count.__doc__ = str("".join(inspect.getsourcelines(filterable_variant_function)[0])) if filterable_variant_function is not None else ""
return count |
Create a function that counts effects that are filtered by the provided filterable_effect_function.
The filterable_effect_function is a function that takes a filterable_effect and returns True or False.
Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well.
def count_effects_function_builder(function_name, only_nonsynonymous, filterable_effect_function=None):
"""
Create a function that counts effects that are filtered by the provided filterable_effect_function.
The filterable_effect_function is a function that takes a filterable_effect and returns True or False.
Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well.
"""
@count_function
def count(row, cohort, filter_fn, normalized_per_mb, **kwargs):
def count_filter_fn(filterable_effect, **kwargs):
assert filter_fn is not None, "filter_fn should never be None, but it is."
return ((filterable_effect_function(filterable_effect) if filterable_effect_function is not None else True) and
filter_fn(filterable_effect, **kwargs))
# This only loads one effect per variant.
patient_id = row["patient_id"]
return cohort.load_effects(
only_nonsynonymous=only_nonsynonymous,
patients=[cohort.patient_from_id(patient_id)],
filter_fn=count_filter_fn,
**kwargs)
count.__name__ = function_name
count.__doc__ = (("only_nonsynonymous=%s\n" % only_nonsynonymous) +
str("".join(inspect.getsourcelines(filterable_effect_function)[0])) if filterable_effect_function is not None else "")
# Keep track of these to be able to query the returned function for these attributes
count.only_nonsynonymous = only_nonsynonymous
count.filterable_effect_function = filterable_effect_function
return count |
Estimate purity based on 2 * median VAF.
Even if the Cohort has a default filter_fn, ignore it: we want to use all variants for
this estimate.
def median_vaf_purity(row, cohort, **kwargs):
"""
Estimate purity based on 2 * median VAF.
Even if the Cohort has a default filter_fn, ignore it: we want to use all variants for
this estimate.
"""
patient_id = row["patient_id"]
patient = cohort.patient_from_id(patient_id)
variants = cohort.load_variants(patients=[patient], filter_fn=no_filter)
if patient_id in variants.keys():
variants = variants[patient_id]
else:
return np.nan
def grab_vaf(variant):
filterable_variant = FilterableVariant(variant, variants, patient)
return variant_stats_from_variant(variant, filterable_variant.variant_metadata).tumor_stats.variant_allele_frequency
vafs = [grab_vaf(variant) for variant in variants]
return 2 * pd.Series(vafs).median() |
Calculate the boostrapped AUC for a given col trying to predict a pred_col.
Parameters
----------
df : pandas.DataFrame
col : str
column to retrieve the values from
pred_col : str
the column we're trying to predict
n_boostrap : int
the number of bootstrap samples
Returns
-------
list : AUCs for each sampling
def bootstrap_auc(df, col, pred_col, n_bootstrap=1000):
"""
Calculate the boostrapped AUC for a given col trying to predict a pred_col.
Parameters
----------
df : pandas.DataFrame
col : str
column to retrieve the values from
pred_col : str
the column we're trying to predict
n_boostrap : int
the number of bootstrap samples
Returns
-------
list : AUCs for each sampling
"""
scores = np.zeros(n_bootstrap)
old_len = len(df)
df.dropna(subset=[col], inplace=True)
new_len = len(df)
if new_len < old_len:
logger.info("Dropping NaN values in %s to go from %d to %d rows" % (col, old_len, new_len))
preds = df[pred_col].astype(int)
for i in range(n_bootstrap):
sampled_counts, sampled_pred = resample(df[col], preds)
if is_single_class(sampled_pred, col=pred_col):
continue
scores[i] = roc_auc_score(sampled_pred, sampled_counts)
return scores |
:param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread.
def set_callbacks(self, worker_start_callback: callable, worker_end_callback: callable, are_async: bool = False):
"""
:param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread.
"""
# We are setting self.worker_start_callback and self.worker_end_callback
# to lambdas instead of saving them in private vars and moving the lambda logic
# to a member function for, among other reasons, making callback updates atomic,
# ie. once a callback has been posted, it will be executed as it was in that
# moment, any call to set_callbacks will only affect callbacks posted since they
# were updated, but not to any pending callback.
# If callback is async, execute the start callback in the calling thread
scheduler = self.immediate if are_async else self.background
self.worker_start_callback = lambda worker: scheduler(Work(
lambda: worker_start_callback(worker), "worker_start_callback:" + worker.name
))
# As the end callback is called *just* before the thread dies,
# there is no problem running it on the thread
self.worker_end_callback = lambda worker: self.immediate(Work(
lambda: worker_end_callback(worker), "worker_end_callback:" + worker.name
)) |
Can be safely called multiple times on the same worker (for workers that support it)
to start a new thread for it.
def _start_worker(self, worker: Worker):
"""
Can be safely called multiple times on the same worker (for workers that support it)
to start a new thread for it.
"""
# This function is called from main thread and from worker pools threads to start their children threads
with self.running_workers_lock:
self.running_workers.append(worker)
thread = SchedulerThread(worker, self._worker_ended)
thread.start()
# This may or may not be posted to a background thread (see set_callbacks)
self.worker_start_callback(worker) |
Creates a new Worker and start a new Thread with it. Returns the Worker.
def new_worker(self, name: str):
"""Creates a new Worker and start a new Thread with it. Returns the Worker."""
if not self.running:
return self.immediate_worker
worker = self._new_worker(name)
self._start_worker(worker)
return worker |
Creates a new worker pool and starts it.
Returns the Worker that schedules works to the pool.
def new_worker_pool(self, name: str, min_workers: int = 0, max_workers: int = 1,
max_seconds_idle: int = DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE):
"""
Creates a new worker pool and starts it.
Returns the Worker that schedules works to the pool.
"""
if not self.running:
return self.immediate_worker
worker = self._new_worker_pool(name, min_workers, max_workers, max_seconds_idle)
self._start_worker_pool(worker)
return worker |
Return this Cohort as a DataFrame, and optionally include additional columns
using `on`.
on : str or function or list or dict, optional
- A column name.
- Or a function that creates a new column for comparison, e.g. count.snv_count.
- Or a list of column-generating functions or column names.
- Or a map of new column names to their column-generating functions or column names.
If `on` is a function or functions, kwargs is passed to those functions.
Otherwise kwargs is ignored.
Other parameters
----------------
`return_cols`: (bool)
If True, return column names generated via `on` along with the `DataFrame`
as a `DataFrameHolder` tuple.
`rename_cols`: (bool)
If True, then return columns using "stripped" column names
("stripped" means lower-case names without punctuation other than `_`)
See `utils.strip_column_names` for more details
defaults to False
`keep_paren_contents`: (bool)
If True, then contents of column names within parens are kept.
If False, contents of column names within-parens are dropped.
Defaults to True
----------
Return : `DataFrame` (or `DataFrameHolder` if `return_cols` is True)
def as_dataframe(self, on=None, join_with=None, join_how=None,
return_cols=False, rename_cols=False,
keep_paren_contents=True, **kwargs):
"""
Return this Cohort as a DataFrame, and optionally include additional columns
using `on`.
on : str or function or list or dict, optional
- A column name.
- Or a function that creates a new column for comparison, e.g. count.snv_count.
- Or a list of column-generating functions or column names.
- Or a map of new column names to their column-generating functions or column names.
If `on` is a function or functions, kwargs is passed to those functions.
Otherwise kwargs is ignored.
Other parameters
----------------
`return_cols`: (bool)
If True, return column names generated via `on` along with the `DataFrame`
as a `DataFrameHolder` tuple.
`rename_cols`: (bool)
If True, then return columns using "stripped" column names
("stripped" means lower-case names without punctuation other than `_`)
See `utils.strip_column_names` for more details
defaults to False
`keep_paren_contents`: (bool)
If True, then contents of column names within parens are kept.
If False, contents of column names within-parens are dropped.
Defaults to True
----------
Return : `DataFrame` (or `DataFrameHolder` if `return_cols` is True)
"""
df = self._as_dataframe_unmodified(join_with=join_with, join_how=join_how)
if on is None:
return DataFrameHolder.return_obj(None, df, return_cols)
if type(on) == str:
return DataFrameHolder.return_obj(on, df, return_cols)
def apply_func(on, col, df):
"""
Sometimes we have functions that, by necessity, have more parameters
than just `row`. We construct a function with just the `row` parameter
so it can be sent to `DataFrame.apply`. We hackishly pass `cohort`
(as `self`) along if the function accepts a `cohort` argument.
"""
on_argnames = on.__code__.co_varnames
if "cohort" not in on_argnames:
func = lambda row: on(row=row, **kwargs)
else:
func = lambda row: on(row=row, cohort=self, **kwargs)
if self.show_progress:
tqdm.pandas(desc=col)
df[col] = df.progress_apply(func, axis=1) ## depends on tqdm on prev line
else:
df[col] = df.apply(func, axis=1)
return DataFrameHolder(col, df)
def func_name(func, num=0):
return func.__name__ if not is_lambda(func) else "column_%d" % num
def is_lambda(func):
return func.__name__ == (lambda: None).__name__
if type(on) == FunctionType:
return apply_func(on, func_name(on), df).return_self(return_cols)
if len(kwargs) > 0:
logger.warning("Note: kwargs used with multiple functions; passing them to all functions")
if type(on) == dict:
cols = []
for key, value in on.items():
if type(value) == str:
df[key] = df[value]
col = key
elif type(value) == FunctionType:
col, df = apply_func(on=value, col=key, df=df)
else:
raise ValueError("A value of `on`, %s, is not a str or function" % str(value))
cols.append(col)
if type(on) == list:
cols = []
for i, elem in enumerate(on):
if type(elem) == str:
col = elem
elif type(elem) == FunctionType:
col = func_name(elem, i)
col, df = apply_func(on=elem, col=col, df=df)
cols.append(col)
if rename_cols:
rename_dict = _strip_column_names(df.columns, keep_paren_contents=keep_paren_contents)
df.rename(columns=rename_dict, inplace=True)
cols = [rename_dict[col] for col in cols]
return DataFrameHolder(cols, df).return_self(return_cols) |
Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes
we may want to just directly load a particular DataFrame.
def load_dataframe(self, df_loader_name):
"""
Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes
we may want to just directly load a particular DataFrame.
"""
logger.debug("loading dataframe: {}".format(df_loader_name))
# Get the DataFrameLoader object corresponding to this name.
df_loaders = [df_loader for df_loader in self.df_loaders if df_loader.name == df_loader_name]
if len(df_loaders) == 0:
raise ValueError("No DataFrameLoader with name %s" % df_loader_name)
if len(df_loaders) > 1:
raise ValueError("Multiple DataFrameLoaders with name %s" % df_loader_name)
return df_loaders[0].load_dataframe() |
Return name of function, using default value if function not defined
def _get_function_name(self, fn, default="None"):
""" Return name of function, using default value if function not defined
"""
if fn is None:
fn_name = default
else:
fn_name = fn.__name__
return fn_name |
Load a dictionary of patient_id to varcode.VariantCollection
Parameters
----------
patients : str, optional
Filter to a subset of patients
filter_fn : function
Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved.
Overrides default self.filter_fn. `None` passes through to self.filter_fn.
Returns
-------
merged_variants
Dictionary of patient_id to VariantCollection
def load_variants(self, patients=None, filter_fn=None, **kwargs):
"""Load a dictionary of patient_id to varcode.VariantCollection
Parameters
----------
patients : str, optional
Filter to a subset of patients
filter_fn : function
Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved.
Overrides default self.filter_fn. `None` passes through to self.filter_fn.
Returns
-------
merged_variants
Dictionary of patient_id to VariantCollection
"""
filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter)
filter_fn_name = self._get_function_name(filter_fn)
logger.debug("loading variants with filter_fn: {}".format(filter_fn_name))
patient_variants = {}
for patient in self.iter_patients(patients):
variants = self._load_single_patient_variants(patient, filter_fn, **kwargs)
if variants is not None:
patient_variants[patient.id] = variants
return patient_variants |
Construct string representing state of filter_fn
Used to cache filtered variants or effects uniquely depending on filter fn values
def _hash_filter_fn(self, filter_fn, **kwargs):
""" Construct string representing state of filter_fn
Used to cache filtered variants or effects uniquely depending on filter fn values
"""
filter_fn_name = self._get_function_name(filter_fn, default="filter-none")
logger.debug("Computing hash for filter_fn: {} with kwargs {}".format(filter_fn_name, str(dict(**kwargs))))
# hash function source code
fn_source = str(dill.source.getsource(filter_fn))
pickled_fn_source = pickle.dumps(fn_source) ## encode as byte string
hashed_fn_source = int(hashlib.sha1(pickled_fn_source).hexdigest(), 16) % (10 ** 11)
# hash kwarg values
kw_dict = dict(**kwargs)
kw_hash = list()
if not kw_dict:
kw_hash = ["default"]
else:
[kw_hash.append("{}-{}".format(key, h)) for (key, h) in sorted(kw_dict.items())]
# hash closure vars - for case where filter_fn is defined within closure of filter_fn
closure = []
nonlocals = inspect.getclosurevars(filter_fn).nonlocals
for (key, val) in nonlocals.items():
## capture hash for any function within closure
if inspect.isfunction(val):
closure.append(self._hash_filter_fn(val))
closure.sort() # Sorted for file name consistency
closure_str = "null" if len(closure) == 0 else "-".join(closure)
# construct final string comprising hashed components
hashed_fn = ".".join(["-".join([filter_fn_name,
str(hashed_fn_source)]),
".".join(kw_hash),
closure_str]
)
return hashed_fn |
Load filtered, merged variants for a single patient, optionally using cache
Note that filtered variants are first merged before filtering, and
each step is cached independently. Turn on debug statements for more
details about cached files.
Use `_load_single_patient_merged_variants` to see merged variants without filtering.
def _load_single_patient_variants(self, patient, filter_fn, use_cache=True, **kwargs):
""" Load filtered, merged variants for a single patient, optionally using cache
Note that filtered variants are first merged before filtering, and
each step is cached independently. Turn on debug statements for more
details about cached files.
Use `_load_single_patient_merged_variants` to see merged variants without filtering.
"""
if filter_fn is None:
use_filtered_cache = False
else:
filter_fn_name = self._get_function_name(filter_fn)
logger.debug("loading variants for patient {} with filter_fn {}".format(patient.id, filter_fn_name))
use_filtered_cache = use_cache
## confirm that we can get cache-name (else don't use filtered cache)
if use_filtered_cache:
logger.debug("... identifying filtered-cache file name")
try:
## try to load filtered variants from cache
filtered_cache_file_name = "%s-variants.%s.pkl" % (self.merge_type,
self._hash_filter_fn(filter_fn, **kwargs))
except:
logger.warning("... error identifying filtered-cache file name for patient {}: {}".format(
patient.id, filter_fn_name))
use_filtered_cache = False
else:
logger.debug("... trying to load filtered variants from cache: {}".format(filtered_cache_file_name))
try:
cached = self.load_from_cache(self.cache_names["variant"], patient.id, filtered_cache_file_name)
if cached is not None:
return cached
except:
logger.warning("Error loading variants from cache for patient: {}".format(patient.id))
pass
## get merged variants
logger.debug("... getting merged variants for: {}".format(patient.id))
merged_variants = self._load_single_patient_merged_variants(patient, use_cache=use_cache)
# Note None here is different from 0. We want to preserve None
if merged_variants is None:
logger.info("Variants did not exist for patient %s" % patient.id)
return None
logger.debug("... applying filters to variants for: {}".format(patient.id))
filtered_variants = filter_variants(variant_collection=merged_variants,
patient=patient,
filter_fn=filter_fn,
**kwargs)
if use_filtered_cache:
logger.debug("... saving filtered variants to cache: {}".format(filtered_cache_file_name))
self.save_to_cache(filtered_variants, self.cache_names["variant"], patient.id, filtered_cache_file_name)
return filtered_variants |
Load merged variants for a single patient, optionally using cache
Note that merged variants are not filtered.
Use `_load_single_patient_variants` to get filtered variants
def _load_single_patient_merged_variants(self, patient, use_cache=True):
""" Load merged variants for a single patient, optionally using cache
Note that merged variants are not filtered.
Use `_load_single_patient_variants` to get filtered variants
"""
logger.debug("loading merged variants for patient {}".format(patient.id))
no_variants = False
try:
# get merged-variants from cache
if use_cache:
## load unfiltered variants into list of collections
variant_cache_file_name = "%s-variants.pkl" % (self.merge_type)
merged_variants = self.load_from_cache(self.cache_names["variant"], patient.id, variant_cache_file_name)
if merged_variants is not None:
return merged_variants
# get variant collections from file
variant_collections = []
optional_maf_cols = ["t_ref_count", "t_alt_count", "n_ref_count", "n_alt_count"]
if self.additional_maf_cols is not None:
optional_maf_cols.extend(self.additional_maf_cols)
for patient_variants in patient.variants_list:
if type(patient_variants) == str:
if ".vcf" in patient_variants:
try:
variant_collections.append(varcode.load_vcf_fast(patient_variants))
# StopIteration is thrown for empty VCFs. For an empty VCF, don't append any variants,
# and don't throw an error. But do record a warning, in case the StopIteration was
# thrown for another reason.
except StopIteration as e:
logger.warning("Empty VCF (or possibly a VCF error) for patient {}: {}".format(
patient.id, str(e)))
elif ".maf" in patient_variants:
# See variant_stats.maf_somatic_variant_stats
variant_collections.append(
varcode.load_maf(
patient_variants,
optional_cols=optional_maf_cols,
encoding="latin-1"))
else:
raise ValueError("Don't know how to read %s" % patient_variants)
elif type(patient_variants) == VariantCollection:
variant_collections.append(patient_variants)
else:
raise ValueError("Don't know how to read %s" % patient_variants)
# merge variant-collections
if len(variant_collections) == 0:
no_variants = True
elif len(variant_collections) == 1:
# There is nothing to merge
variants = variant_collections[0]
merged_variants = variants
else:
merged_variants = self._merge_variant_collections(variant_collections, self.merge_type)
except IOError:
no_variants = True
# Note that this is the number of variant collections and not the number of
# variants. 0 variants will lead to 0 neoantigens, for example, but 0 variant
# collections will lead to NaN variants and neoantigens.
if no_variants:
print("Variants did not exist for patient %s" % patient.id)
merged_variants = None
# save merged variants to file
if use_cache:
self.save_to_cache(merged_variants, self.cache_names["variant"], patient.id, variant_cache_file_name)
return merged_variants |
Load a dataframe containing polyphen2 annotations for all variants
Parameters
----------
database_file : string, sqlite
Path to the WHESS/Polyphen2 SQLite database.
Can be downloaded and bunzip2"ed from http://bit.ly/208mlIU
filter_fn : function
Takes a FilterablePolyphen and returns a boolean.
Only annotations returning True are preserved.
Overrides default self.filter_fn. `None` passes through to self.filter_fn.
Returns
-------
annotations
Dictionary of patient_id to a DataFrame that contains annotations
def load_polyphen_annotations(self, as_dataframe=False,
filter_fn=None):
"""Load a dataframe containing polyphen2 annotations for all variants
Parameters
----------
database_file : string, sqlite
Path to the WHESS/Polyphen2 SQLite database.
Can be downloaded and bunzip2"ed from http://bit.ly/208mlIU
filter_fn : function
Takes a FilterablePolyphen and returns a boolean.
Only annotations returning True are preserved.
Overrides default self.filter_fn. `None` passes through to self.filter_fn.
Returns
-------
annotations
Dictionary of patient_id to a DataFrame that contains annotations
"""
filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter)
patient_annotations = {}
for patient in self:
annotations = self._load_single_patient_polyphen(
patient,
filter_fn=filter_fn)
if annotations is not None:
annotations["patient_id"] = patient.id
patient_annotations[patient.id] = annotations
if as_dataframe:
return pd.concat(patient_annotations.values())
return patient_annotations |
Load a dictionary of patient_id to varcode.EffectCollection
Note that this only loads one effect per variant.
Parameters
----------
patients : str, optional
Filter to a subset of patients
only_nonsynonymous : bool, optional
If true, load only nonsynonymous effects, default False
all_effects : bool, optional
If true, return all effects rather than only the top-priority effect per variant
filter_fn : function
Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved.
Overrides default self.filter_fn. `None` passes through to self.filter_fn.
Returns
-------
effects
Dictionary of patient_id to varcode.EffectCollection
def load_effects(self, patients=None, only_nonsynonymous=False,
all_effects=False, filter_fn=None, **kwargs):
"""Load a dictionary of patient_id to varcode.EffectCollection
Note that this only loads one effect per variant.
Parameters
----------
patients : str, optional
Filter to a subset of patients
only_nonsynonymous : bool, optional
If true, load only nonsynonymous effects, default False
all_effects : bool, optional
If true, return all effects rather than only the top-priority effect per variant
filter_fn : function
Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved.
Overrides default self.filter_fn. `None` passes through to self.filter_fn.
Returns
-------
effects
Dictionary of patient_id to varcode.EffectCollection
"""
filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter)
filter_fn_name = self._get_function_name(filter_fn)
logger.debug("loading effects with filter_fn {}".format(filter_fn_name))
patient_effects = {}
for patient in self.iter_patients(patients):
effects = self._load_single_patient_effects(
patient, only_nonsynonymous, all_effects, filter_fn, **kwargs)
if effects is not None:
patient_effects[patient.id] = effects
return patient_effects |
Load Kallisto transcript quantification data for a cohort
Parameters
----------
Returns
-------
kallisto_data : Pandas dataframe
Pandas dataframe with Kallisto data for all patients
columns include patient_id, gene_name, est_counts
def load_kallisto(self):
"""
Load Kallisto transcript quantification data for a cohort
Parameters
----------
Returns
-------
kallisto_data : Pandas dataframe
Pandas dataframe with Kallisto data for all patients
columns include patient_id, gene_name, est_counts
"""
kallisto_data = pd.concat(
[self._load_single_patient_kallisto(patient) for patient in self],
copy=False
)
if self.kallisto_ensembl_version is None:
raise ValueError("Required a kallisto_ensembl_version but none was specified")
ensembl_release = cached_release(self.kallisto_ensembl_version)
kallisto_data["gene_name"] = \
kallisto_data["target_id"].map(lambda t: ensembl_release.gene_name_of_transcript_id(t))
# sum counts across genes
kallisto_data = \
kallisto_data.groupby(["patient_id", "gene_name"])[["est_counts"]].sum().reset_index()
return kallisto_data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.