_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q258400 | BasePlayer._ensure_started | validation | def _ensure_started(self):
"""Ensure player backing process is started
"""
if self._process and self._process.poll() is None:
return
if not getattr(self, "_cmd"):
raise RuntimeError("Player command is not configured")
| python | {
"resource": ""
} |
q258401 | BasePlayer.play | validation | def play(self, song):
"""Play a new song from a Pandora model
Returns once the stream starts but does not shut down the remote audio
output backend process. Calls the input callback when the user has
input.
"""
self._callbacks.play(song)
self._load_track(song)
time.sleep(2) # Give the backend time to load the track
while True:
try:
| python | {
"resource": ""
} |
q258402 | BasePlayer.play_station | validation | def play_station(self, station):
"""Play the station until something ends it
This function will run forever until termintated by calling
end_station.
"""
for song in iterate_forever(station.get_playlist):
| python | {
"resource": ""
} |
q258403 | VLCPlayer._post_start | validation | def _post_start(self):
"""Set stdout to non-blocking
VLC does not always return a newline when reading status so in order to
be lazy and still use the read API without caring about how much output
there is we switch stdout to nonblocking mode and just read a large
chunk of datin order to be lazy and still use the read API without
| python | {
"resource": ""
} |
q258404 | PlayerApp.station_selection_menu | validation | def station_selection_menu(self, error=None):
"""Format a station menu and make the user select a station
"""
self.screen.clear()
if error:
| python | {
"resource": ""
} |
q258405 | PlayerApp.input | validation | def input(self, input, song):
"""Input callback, handles key presses
"""
try:
cmd = getattr(self, self.CMD_MAP[input][1])
except (IndexError, KeyError):
| python | {
"resource": ""
} |
q258406 | retries | validation | def retries(max_tries, exceptions=(Exception,)):
"""Function decorator implementing retrying logic.
exceptions: A tuple of exception classes; default (Exception,)
The decorator will call the function up to max_tries times if it raises
an exception.
By default it catches instances of the Exception class and subclasses.
This will recover after all but the most fatal errors. You may specify a
custom tuple of exception classes with the 'exceptions' argument; the
function will only be retried if it raises one of the specified
exceptions.
"""
def decorator(func):
def function(*args, **kwargs):
retries_left = max_tries
while retries_left > 0:
try:
retries_left -= 1
return func(*args, **kwargs)
except exceptions as exc:
| python | {
"resource": ""
} |
q258407 | iterate_forever | validation | def iterate_forever(func, *args, **kwargs):
"""Iterate over a finite iterator forever
When the iterator is exhausted will call the function again to generate a
new iterator and keep iterating.
"""
output = func(*args, **kwargs)
while True:
try:
| python | {
"resource": ""
} |
q258408 | Screen.get_integer | validation | def get_integer(prompt):
"""Gather user input and convert it to an integer
Will keep trying till the user enters an interger or until they ^C the
program.
"""
while True:
| python | {
"resource": ""
} |
q258409 | TaskPackageDropbox.open | validation | def open(self):
"""open the drop box
You need to call this method before starting putting packages.
Returns
-------
None
| python | {
"resource": ""
} |
q258410 | TaskPackageDropbox.put | validation | def put(self, package):
"""put a task
This method places a task in the working area and have the
dispatcher execute it.
If you need to put multiple tasks, it can be much faster to
use `put_multiple()` than to use this method multiple times
depending of the dispatcher.
Parameters
----------
package : callable
A task
Returns
-------
int
| python | {
"resource": ""
} |
q258411 | TaskPackageDropbox.receive | validation | def receive(self):
"""return pairs of package indices and results of all tasks
This method waits until all tasks finish.
Returns
-------
list
A list of pairs of package indices and results
"""
ret = [ ] # | python | {
"resource": ""
} |
q258412 | TaskPackageDropbox.poll | validation | def poll(self):
"""return pairs of package indices and results of finished tasks
This method does not wait for tasks to finish.
Returns
-------
list
A list of pairs of package indices | python | {
"resource": ""
} |
q258413 | TaskPackageDropbox.receive_one | validation | def receive_one(self):
"""return a pair of a package index and result of a task
This method waits until a tasks finishes. It returns `None` if
no task is running.
Returns
-------
tuple or None
A pair of a package index and result. `None` if no tasks
is running.
"""
if not self.runid_pkgidx_map:
return None
while True:
| python | {
"resource": ""
} |
q258414 | MPEventLoopRunner.run_multiple | validation | def run_multiple(self, eventLoops):
"""run the event loops in the background.
Args:
eventLoops (list): a list of event loops to run
| python | {
"resource": ""
} |
q258415 | MPEventLoopRunner.poll | validation | def poll(self):
"""Return pairs of run ids and results of finish event loops.
"""
ret | python | {
"resource": ""
} |
q258416 | MPEventLoopRunner.receive_one | validation | def receive_one(self):
"""Return a pair of a run id and a result.
This method waits until an event loop finishes.
This method returns None if no loop is running.
"""
if self.nruns == 0:
| python | {
"resource": ""
} |
q258417 | MPEventLoopRunner.receive | validation | def receive(self):
"""Return pairs of run ids and results.
This method waits until all event loops finish
"""
ret = self.communicationChannel.receive_all()
self.nruns -= len(ret)
if self.nruns > 0:
import logging
logger = logging.getLogger(__name__)
logger.warning(
'too few results received: {} results received, {} more expected'.format(
len(ret), self.nruns))
elif self.nruns < 0: | python | {
"resource": ""
} |
q258418 | MPEventLoopRunner.end | validation | def end(self):
"""wait until all event loops end and returns the results.
"""
results = self.communicationChannel.receive()
if self.nruns != len(results):
import logging
logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
| python | {
"resource": ""
} |
q258419 | key_vals_dict_to_tuple_list | validation | def key_vals_dict_to_tuple_list(key_vals_dict, fill=float('nan')):
"""Convert ``key_vals_dict`` to `tuple_list``.
Args:
key_vals_dict (dict): The first parameter.
fill: a value to fill missing data
Returns:
A list of tuples
"""
tuple_list = [ ]
if not key_vals_dict: return tuple_list
vlen = max([len(vs) for vs in itertools.chain(*key_vals_dict.values())])
for k, vs in key_vals_dict.items():
| python | {
"resource": ""
} |
q258420 | WorkingArea.open | validation | def open(self):
"""Open the working area
Returns
-------
None
"""
self.path = self._prepare_dir(self.topdir)
| python | {
"resource": ""
} |
q258421 | WorkingArea.put_package | validation | def put_package(self, package):
"""Put a package
Parameters
----------
package :
a task package
Returns
-------
int
A package index
"""
self.last_package_index += 1
package_index = self.last_package_index
package_fullpath = self.package_fullpath(package_index)
| python | {
"resource": ""
} |
q258422 | WorkingArea.collect_result | validation | def collect_result(self, package_index):
"""Collect the result of a task
Parameters
----------
package_index :
a package index
Returns
-------
obj
The result of the task
"""
result_fullpath = self.result_fullpath(package_index)
# e.g., '{path}/tpd_20161129_122841_HnpcmF/results/task_00009/result.p.gz'
try:
| python | {
"resource": ""
} |
q258423 | WorkingArea.package_fullpath | validation | def package_fullpath(self, package_index):
"""Returns the full path of the package
This method returns the full path to the package. This method
simply constructs the path based on the convention and doesn't
check if the package actually exists.
Parameters
----------
package_index :
a package index
Returns
-------
| python | {
"resource": ""
} |
q258424 | WorkingArea.result_relpath | validation | def result_relpath(self, package_index):
"""Returns the relative path of the result
This method returns the path to the result relative to the
top dir of the working area. This method simply constructs the
path based on the convention and doesn't check if the result
actually exists.
Parameters
----------
package_index :
a package index
Returns
------- | python | {
"resource": ""
} |
q258425 | WorkingArea.result_fullpath | validation | def result_fullpath(self, package_index):
"""Returns the full path of the result
This method returns the full path to the result. This method
simply constructs the path based on the convention and doesn't
check if the result actually exists.
Parameters
----------
package_index :
a package index
Returns
-------
| python | {
"resource": ""
} |
q258426 | HTCondorJobSubmitter.run_multiple | validation | def run_multiple(self, workingArea, package_indices):
"""Submit multiple jobs
Parameters
----------
workingArea :
A workingArea
package_indices : list(int)
A list of package indices
Returns
-------
list(str)
The list of the run IDs of the jobs
"""
if not package_indices:
return [ ]
job_desc = self._compose_job_desc(workingArea, package_indices)
clusterprocids = submit_jobs(job_desc, cwd=workingArea.path)
| python | {
"resource": ""
} |
q258427 | HTCondorJobSubmitter.poll | validation | def poll(self):
"""Return the run IDs of the finished jobs
Returns
-------
list(str)
The list of the run IDs of the finished jobs
"""
clusterids = clusterprocids2clusterids(self.clusterprocids_outstanding)
clusterprocid_status_list = query_status_for(clusterids)
# e.g., [['1730126.0', 2], ['1730127.0', 2], ['1730129.1', 1], ['1730130.0', 1]]
if clusterprocid_status_list:
clusterprocids, statuses = zip(*clusterprocid_status_list)
else:
clusterprocids, statuses = (), ()
clusterprocids_finished = [i for i in self.clusterprocids_outstanding if i not in clusterprocids]
self.clusterprocids_finished.extend(clusterprocids_finished)
self.clusterprocids_outstanding[:] = clusterprocids
| python | {
"resource": ""
} |
q258428 | HTCondorJobSubmitter.wait | validation | def wait(self):
"""Wait until all jobs finish and return the run IDs of the finished jobs
Returns
-------
list(str)
The list of the run IDs of the finished jobs
| python | {
"resource": ""
} |
q258429 | HTCondorJobSubmitter.failed_runids | validation | def failed_runids(self, runids):
"""Provide the run IDs of failed jobs
Returns
-------
None
"""
# remove failed clusterprocids from self.clusterprocids_finished
| python | {
"resource": ""
} |
q258430 | BranchAddressManager.getArrays | validation | def getArrays(self, tree, branchName):
"""return the array.array objects for the branch and its counter branch
This method returns a pair of the array.array objects. The first one is
for the given tree and branch name. The second one is for its counter
| python | {
"resource": ""
} |
q258431 | CommunicationChannel.put | validation | def put(self, task, *args, **kwargs):
"""put a task and its arguments
If you need to put multiple tasks, it can be faster to put
multiple tasks with `put_multiple()` than to use this method
multiple times.
Parameters
----------
task : a function
A function to be executed
args : list
A list of positional arguments to the `task`
kwargs : dict
A dict with keyword arguments to the `task`
Returns
-------
int, str, or any hashable and sortable
| python | {
"resource": ""
} |
q258432 | CommunicationChannel.put_multiple | validation | def put_multiple(self, task_args_kwargs_list):
"""put a list of tasks and their arguments
This method can be used to put multiple tasks at once. Calling
this method once with multiple tasks can be much faster than
calling `put()` multiple times.
Parameters
----------
task_args_kwargs_list : list
A list of lists with three items that can be parameters of
`put()`, i.e., `task`, `args`, `kwargs`.
Returns
-------
list
A list of task IDs.
"""
if not self.isopen:
logger = logging.getLogger(__name__)
logger.warning('the drop box is not open')
return
packages = [ ]
for t in task_args_kwargs_list:
| python | {
"resource": ""
} |
q258433 | CommunicationChannel.receive_finished | validation | def receive_finished(self):
"""return a list of pairs of IDs and results of finished tasks.
This method doesn't wait for tasks to finish. It returns IDs
and results which have already finished.
Returns
-------
list
A list of pairs of IDs and results
"""
if not self.isopen:
| python | {
"resource": ""
} |
q258434 | CommunicationChannel.receive_one | validation | def receive_one(self):
"""return a pair of an ID and a result of a task.
This method waits for a task to finish.
Returns
-------
An ID and a result of a task. `None` if no task is running.
"""
if not self.isopen:
| python | {
"resource": ""
} |
q258435 | CommunicationChannel.receive_all | validation | def receive_all(self):
"""return a list of pairs of IDs and results of all tasks.
This method waits for all tasks to finish.
Returns
-------
| python | {
"resource": ""
} |
q258436 | CommunicationChannel.receive | validation | def receive(self):
"""return a list results of all tasks.
This method waits for all tasks to finish.
Returns
-------
list
A list of results of the tasks. The results are sorted in
the order in which the tasks are put.
| python | {
"resource": ""
} |
q258437 | expand_path_cfg | validation | def expand_path_cfg(path_cfg, alias_dict={ }, overriding_kargs={ }):
"""expand a path config
Args:
path_cfg (str, tuple, dict): a config for path
alias_dict (dict): a dict for aliases
overriding_kargs (dict): to be used for recursive call
"""
if isinstance(path_cfg, str):
return _expand_str(path_cfg, alias_dict, overriding_kargs)
| python | {
"resource": ""
} |
q258438 | _expand_tuple | validation | def _expand_tuple(path_cfg, alias_dict, overriding_kargs):
"""expand a path config given as a tuple
"""
# e.g.,
# path_cfg = ('ev : {low} <= ev.var[0] < {high}', {'low': 10, 'high': 200})
# overriding_kargs = {'alias': 'var_cut', 'name': 'var_cut25', 'low': 25}
new_path_cfg = path_cfg[0]
# e.g., 'ev : {low} <= ev.var[0] < {high}'
new_overriding_kargs = path_cfg[1].copy()
# e.g., {'low': | python | {
"resource": ""
} |
q258439 | SubprocessRunner.poll | validation | def poll(self):
"""check if the jobs are running and return a list of pids for
finished jobs
"""
finished_procs = [p for p in self.running_procs if p.poll() is not None]
self.running_procs = collections.deque([p for p in self.running_procs if p not in finished_procs])
for proc in finished_procs:
stdout, stderr = proc.communicate()
## proc.communicate() returns (stdout, stderr) when
## self.pipe = True. Otherwise they are (None, None)
finished_pids = [p.pid for p in finished_procs]
| python | {
"resource": ""
} |
q258440 | SubprocessRunner.wait | validation | def wait(self):
"""wait until all jobs finish and return a list of pids
"""
finished_pids = [ ]
| python | {
"resource": ""
} |
q258441 | BranchAddressManagerForVector.getVector | validation | def getVector(self, tree, branchName):
"""return the ROOT.vector object for the branch.
"""
if (tree, branchName) in self.__class__.addressDict:
return self.__class__.addressDict[(tree, branchName)]
| python | {
"resource": ""
} |
q258442 | CMakeGen.configure | validation | def configure(self, component, all_dependencies):
''' Ensure all config-time files have been generated. Return a
dictionary of generated items.
'''
r = {}
builddir = self.buildroot
# only dependencies which are actually valid can contribute to the
# config data (which includes the versions of all dependencies in its
# build info) if the dependencies aren't available we can't tell what
# version they are. Anything missing here should always be a test
# dependency that isn't going to be used, otherwise the yotta build
# command will fail before we get here
available_dependencies = OrderedDict((k, v) for k, v in all_dependencies.items() if v)
self.set_toplevel_definitions = ''
if self.build_info_include_file is None:
self.build_info_include_file, build_info_definitions = self.getBuildInfo(component.path, builddir)
| python | {
"resource": ""
} |
q258443 | _getTarball | validation | def _getTarball(url, into_directory, cache_key, origin_info=None):
'''unpack the specified tarball url into the specified directory'''
try:
access_common.unpackFromCache(cache_key, into_directory)
except KeyError as e:
tok = settings.getProperty('github', 'authtoken')
headers = {}
if tok is not None:
headers['Authorization'] = 'token ' + str(tok)
logger.debug('GET %s', url)
| python | {
"resource": ""
} |
q258444 | GithubComponent.availableVersions | validation | def availableVersions(self):
''' return a list of Version objects, each with a tarball URL set '''
r = []
for t in self._getTags():
logger.debug("available version tag: %s", t)
# ignore empty tags:
if not len(t[0].strip()):
continue
try:
| python | {
"resource": ""
} |
q258445 | GithubComponent.availableTags | validation | def availableTags(self):
''' return a list of GithubComponentVersion objects for all tags
'''
return [
| python | {
"resource": ""
} |
q258446 | GithubComponent.availableBranches | validation | def availableBranches(self):
''' return a list of GithubComponentVersion objects for the tip of each branch
'''
return [
GithubComponentVersion(
| python | {
"resource": ""
} |
q258447 | _raiseUnavailableFor401 | validation | def _raiseUnavailableFor401(message):
''' Returns a decorator to swallow a requests exception for modules that
are not accessible without logging in, and turn it into an Unavailable
exception.
'''
def __raiseUnavailableFor401(fn):
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if | python | {
"resource": ""
} |
q258448 | unpublish | validation | def unpublish(namespace, name, version, registry=None):
''' Try to unpublish a recently published version. Return any errors that
occur.
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/versions/%s' % (
registry,
namespace,
| python | {
"resource": ""
} |
q258449 | _JSONConfigParser.read | validation | def read(self, filenames):
'''' Read a list of files. Their configuration values are merged, with
preference to values from files earlier in the list.
'''
for fn in filenames:
try:
self.configs[fn] = ordered_json.load(fn)
| python | {
"resource": ""
} |
q258450 | _JSONConfigParser.get | validation | def get(self, path):
''' return a configuration value
usage:
get('section.property')
Note that currently array indexes are not supported. You must
get the whole array.
returns None if any path element or the property is missing
'''
path = _splitPath(path)
for config in self.configs.values():
cur = config
for el in path:
| python | {
"resource": ""
} |
q258451 | _JSONConfigParser.set | validation | def set(self, path, value=None, filename=None):
''' Set a configuration value. If no filename is specified, the
property is set in the first configuration file. Note that if a
filename is specified and the property path is present in an
earlier filename then set property will be hidden.
usage:
set('section.property', value='somevalue')
Note that currently array indexes are not supported. You must
set the whole array.
'''
if filename is None:
| python | {
"resource": ""
} |
q258452 | islast | validation | def islast(generator):
''' indicate whether the current item is the last one in a generator
'''
next_x = None
first = True
for x in generator:
if not first:
| python | {
"resource": ""
} |
q258453 | sourceDirValidationError | validation | def sourceDirValidationError(dirname, component_name):
''' validate source directory names in components '''
if dirname == component_name:
return 'Module %s public include directory %s should not contain source files' % (component_name, dirname)
elif dirname.lower() in ('source', 'src') and dirname != 'source':
return 'Module %s has non-standard source directory name: "%s" should be "source"' % (component_name, dirname)
elif isPotentialTestDir(dirname) and dirname != 'test':
return 'Module %s has non-standard test directory name: "%s" | python | {
"resource": ""
} |
q258454 | displayOutdated | validation | def displayOutdated(modules, dependency_specs, use_colours):
''' print information about outdated modules,
return 0 if there is nothing to be done and nonzero otherwise
'''
if use_colours:
DIM = colorama.Style.DIM #pylint: disable=no-member
NORMAL = colorama.Style.NORMAL #pylint: disable=no-member
BRIGHT = colorama.Style.BRIGHT #pylint: disable=no-member
YELLOW = colorama.Fore.YELLOW #pylint: disable=no-member
RED = colorama.Fore.RED #pylint: disable=no-member
GREEN = colorama.Fore.GREEN #pylint: disable=no-member
RESET = colorama.Style.RESET_ALL #pylint: disable=no-member
else:
DIM = BRIGHT = YELLOW = RED = GREEN = RESET = u''
status = 0
# access, , get components, internal
from yotta.lib import access
from yotta.lib import access_common
# sourceparse, , parse version source urls, internal
from yotta.lib import sourceparse
for name, m in modules.items():
if m.isTestDependency():
continue
try:
latest_v = access.latestSuitableVersion(name, '*', registry='modules', quiet=True)
except access_common.Unavailable as e:
latest_v = None
if not m:
m_version = u' ' + RESET + BRIGHT + RED + u"missing" + RESET
else:
m_version = DIM + u'@%s' % (m.version)
if not latest_v:
print(u'%s%s%s%s not available from the registry%s' % (RED, name, m_version, NORMAL, RESET))
status = 2
continue
elif not m or m.version < latest_v:
update_prevented_by = ''
if m:
specs_preventing_update = [
x for x in dependency_specs
if x.name == name and not
sourceparse.parseSourceURL(x.nonShrinkwrappedVersionReq()).semanticSpecMatches(latest_v)
]
shrinkwrap_prevents_update = [
x for x in dependency_specs
if x.name == name | python | {
"resource": ""
} |
q258455 | Pack.ignores | validation | def ignores(self, path):
''' Test if this module ignores the file at "path", which must be a
path relative to the root of the module.
If a file is within a directory that is ignored, the file is also
ignored.
'''
test_path = PurePath('/', path)
# also check any parent directories of this path against the ignore
# patterns:
| python | {
"resource": ""
} |
q258456 | Pack.publish | validation | def publish(self, registry=None):
''' Publish to the appropriate registry, return a description of any
errors that occured, or None if successful.
No VCS tagging is performed.
'''
if (registry is None) or (registry == registry_access.Registry_Base_URL):
if 'private' in self.description and self.description['private']:
return "this %s is private and cannot be published" % (self.description_filename.split('.')[0])
upload_archive = os.path.join(self.path, 'upload.tar.gz')
fsutils.rmF(upload_archive)
fd = os.open(upload_archive, os.O_CREAT | os.O_EXCL | os.O_RDWR | getattr(os, "O_BINARY", 0))
with os.fdopen(fd, 'rb+') as tar_file:
tar_file.truncate()
self.generateTarball(tar_file)
logger.debug('generated tar file of length %s', tar_file.tell())
tar_file.seek(0)
# calculate the hash of the file before we upload it:
shasum = hashlib.sha256()
while True:
chunk = tar_file.read(1000)
if not chunk:
break
| python | {
"resource": ""
} |
q258457 | Pack.unpublish | validation | def unpublish(self, registry=None):
''' Try to un-publish the current version. Return a description of any
errors that occured, or None | python | {
"resource": ""
} |
q258458 | Pack.getScript | validation | def getScript(self, scriptname):
''' Return the specified script command. If the first part of the
command is a .py file, then the current python interpreter is
prepended.
If the script is a single string, rather than an array, it is
shlex-split.
'''
script = self.description.get('scripts', {}).get(scriptname, None)
if script is not None:
if isinstance(script, str) or isinstance(script, type(u'unicode string')):
import shlex
script = shlex.split(script)
| python | {
"resource": ""
} |
q258459 | Pack.runScript | validation | def runScript(self, scriptname, additional_environment=None):
''' Run the specified script from the scripts section of the
module.json file in the directory of this module.
'''
import subprocess
import shlex
command = self.getScript(scriptname)
if command is None:
logger.debug('%s has no script %s', self, scriptname)
return 0
if not len(command):
logger.error("script %s of %s is empty", scriptname, self.getName())
return 1
# define additional environment variables for scripts:
env = os.environ.copy()
if additional_environment is not None:
env.update(additional_environment)
errcode = 0
child = None
try:
logger.debug('running script: %s', command)
child = subprocess.Popen(
command, cwd = self.path, env = env
)
| python | {
"resource": ""
} |
q258460 | Component.hasDependency | validation | def hasDependency(self, name, target=None, test_dependencies=False):
''' Check if this module has any dependencies with the specified name
in its dependencies list, or in target dependencies for the
specified target
'''
if name in self.description.get('dependencies', {}).keys():
return True
target_deps = self.description.get('targetDependencies', {})
if target is not None:
for conf_key, target_conf_deps in target_deps.items():
if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated():
if name in target_conf_deps:
return True
if test_dependencies:
if name in self.description.get('testDependencies', {}).keys():
return True
if target is not None:
| python | {
"resource": ""
} |
q258461 | Component.hasDependencyRecursively | validation | def hasDependencyRecursively(self, name, target=None, test_dependencies=False):
''' Check if this module, or any of its dependencies, have a
dependencies with the specified name in their dependencies, or in
their targetDependencies corresponding to the specified target.
Note that if recursive dependencies | python | {
"resource": ""
} |
q258462 | Component.satisfyDependenciesRecursive | validation | def satisfyDependenciesRecursive(
self,
available_components = None,
search_dirs = None,
update_installed = False,
traverse_links = False,
target = None,
test = False
):
''' Retrieve and install all the dependencies of this component and its
dependencies, recursively, or satisfy them from a collection of
available_components or from disk.
Returns
=======
(components, errors)
components: dictionary of name:Component
errors: sequence of errors
Parameters
==========
available_components:
None (default) or a dictionary of name:component. This is
searched before searching directories or fetching remote
components
search_dirs:
None (default), or sequence of directories to search for
already installed, (but not yet loaded) components. Used so
that manually installed or linked components higher up the
dependency tree are found by their users lower down.
These directories are searched in order, and finally the
current directory is checked.
update_installed:
False (default), True, or set(): whether to check the
available versions of installed components, and update if a
newer version is available. If this is a set(), only update
things in the specified set.
traverse_links:
False (default) or True: whether to recurse into linked
dependencies when updating/installing.
target:
None (default), or a Target object. If specified the target
name and it's similarTo list will be used in resolving
dependencies. If None, then only target-independent
dependencies will be installed
test:
True, False, or 'toplevel: should test-only dependencies be
installed? (yes, no, or only for this module, not its
dependencies).
'''
def provider(
dspec,
available_components,
search_dirs,
working_directory,
update_installed,
dep_of=None
):
r = access.satisfyFromAvailable(dspec.name, available_components)
if r:
if r.isTestDependency() and not dspec.is_test_dependency:
logger.debug('test dependency subsequently occurred as real dependency: %s', r.getName())
r.setTestDependency(False)
return r
update_if_installed = False
if update_installed is True:
update_if_installed = True
elif update_installed:
update_if_installed = dspec.name in update_installed
r = access.satisfyVersionFromSearchPaths(
dspec.name,
dspec.versionReq(),
search_dirs,
update_if_installed,
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
if r:
r.setTestDependency(dspec.is_test_dependency)
| python | {
"resource": ""
} |
q258463 | Component.getExtraIncludes | validation | def getExtraIncludes(self):
''' Some components must export whole directories full of headers into
the search path. This is really really bad, and they shouldn't do
it, but support is provided as a concession to compatibility.
'''
| python | {
"resource": ""
} |
q258464 | GitWorkingCopy.availableVersions | validation | def availableVersions(self):
''' return a list of GitCloneVersion objects for tags which are valid
semantic version idenfitifiers.
'''
r = []
for t in self.vcs.tags():
logger.debug("available version tag: %s", t)
# ignore empty tags:
if not len(t.strip()):
continue
| python | {
"resource": ""
} |
q258465 | _mergeDictionaries | validation | def _mergeDictionaries(*args):
''' merge dictionaries of dictionaries recursively, with elements from
dictionaries earlier in the argument sequence taking precedence
'''
# to support merging of OrderedDicts, copy the result type from the first
# argument:
result = type(args[0])()
for k, v in itertools.chain(*[x.items() for x in args]):
| python | {
"resource": ""
} |
q258466 | _mirrorStructure | validation | def _mirrorStructure(dictionary, value):
''' create a new nested dictionary object with the same structure as
'dictionary', but with all scalar values replaced with 'value' | python | {
"resource": ""
} |
q258467 | Target.baseTargetSpec | validation | def baseTargetSpec(self):
''' returns pack.DependencySpec for the base target of this target (or
None if this target does not inherit from another target.
'''
inherits = self.description.get('inherits', {})
if len(inherits) == 1:
name, version_req = list(inherits.items())[0]
shrinkwrap_version_req = self.getShrinkwrapMapping('targets').get(name, None)
if shrinkwrap_version_req is not None:
logger.debug(
'respecting shrinkwrap version %s for %s', shrinkwrap_version_req, name
| python | {
"resource": ""
} |
q258468 | DerivedTarget._loadConfig | validation | def _loadConfig(self):
''' load the configuration information from the target hierarchy '''
config_dicts = [self.additional_config, self.app_config] + [t.getConfig() for t in self.hierarchy]
# create an identical set of dictionaries, but with the names of the
# sources in place of the values. When these are merged they will show
# where each merged property came from:
config_blame = [
| python | {
"resource": ""
} |
q258469 | DerivedTarget.inheritsFrom | validation | def inheritsFrom(self, target_name):
''' Return true if this target inherits from the named target (directly
or indirectly. Also returns true if this target is the named
target. Otherwise return false.
'''
| python | {
"resource": ""
} |
q258470 | DerivedTarget.exec_helper | validation | def exec_helper(self, cmd, builddir):
''' Execute the given command, returning an error message if an error occured
or None if the command was succesful.'''
try:
child = subprocess.Popen(cmd, cwd=builddir)
child.wait()
except OSError as e:
| python | {
"resource": ""
} |
q258471 | DerivedTarget.build | validation | def build(self, builddir, component, args, release_build=False, build_args=None, targets=None,
release_no_debug_info_build=False):
''' Execute the commands necessary to build this component, and all of
its dependencies. '''
if build_args is None:
build_args = []
if targets is None:
targets = []
# in the future this may be specified in the target description, but
# for now we only support cmake, so everything is simple:
if release_no_debug_info_build:
build_type = 'Release'
elif release_build:
build_type = 'RelWithDebInfo'
else:
build_type = 'Debug'
cmd = ['cmake', '-D', 'CMAKE_BUILD_TYPE=%s' % build_type, '-G', args.cmake_generator, '.']
res = self.exec_helper(cmd, builddir)
if res is not None:
return res
# work-around various yotta-specific issues with the generated
# | python | {
"resource": ""
} |
q258472 | DerivedTarget.findProgram | validation | def findProgram(self, builddir, program):
''' Return the builddir-relative path of program, if only a partial
path is specified. Returns None and logs an error message if the
program is ambiguous or not found
'''
# if this is an exact match, do no further checking:
if os.path.isfile(os.path.join(builddir, program)):
logging.info('found %s' % program)
return program
exact_matches = []
insensitive_matches = []
approx_matches = []
for path, dirs, files in os.walk(builddir):
if program in files:
exact_matches.append(os.path.relpath(os.path.join(path, program), builddir))
continue
files_lower = [f.lower() for f in files]
if program.lower() in files_lower:
insensitive_matches.append(
os.path.relpath(
os.path.join(path, files[files_lower.index(program.lower())]),
builddir
)
)
continue
# !!! TODO: in the future add approximate string matching (typos,
# etc.), for now we just test stripping any paths off program, and
# looking for substring matches:
pg_basen_lower_noext = os.path.splitext(os.path.basename(program).lower())[0]
for f in files_lower:
if pg_basen_lower_noext in f:
approx_matches.append(
os.path.relpath(
os.path.join(path, files[files_lower.index(f)]),
builddir
)
)
if len(exact_matches) == 1:
logging.info('found %s at %s', program, exact_matches[0])
return exact_matches[0]
elif len(exact_matches) > 1:
logging.error(
'%s matches multiple executables, please use a full path (one of %s)' % (
| python | {
"resource": ""
} |
q258473 | DerivedTarget.start | validation | def start(self, builddir, program, forward_args):
''' Launch the specified program. Uses the `start` script if specified
by the target, attempts to run it natively if that script is not
defined.
'''
child = None
try:
prog_path = self.findProgram(builddir, program)
if prog_path is None:
return
start_env, start_vars = self.buildProgEnvAndVars(prog_path, builddir)
if self.getScript('start'):
cmd = [
os.path.expandvars(string.Template(x).safe_substitute(**start_vars))
for x in self.getScript('start')
] + forward_args
else:
cmd = shlex.split('./' + prog_path) + forward_args
logger.debug('starting program: %s', cmd)
child = subprocess.Popen(
cmd, cwd = builddir, env = start_env
| python | {
"resource": ""
} |
q258474 | pruneCache | validation | def pruneCache():
''' Prune the cache '''
cache_dir = folders.cacheDirectory()
def fullpath(f):
return os.path.join(cache_dir, f)
def getMTimeSafe(f):
# it's possible that another process removed the file before we stat
# it, handle this gracefully
try:
return os.stat(f).st_mtime
except FileNotFoundError:
import time
return time.clock()
# ensure cache exists
fsutils.mkDirP(cache_dir)
max_cached_modules = getMaxCachedModules()
for f in sorted(
[f for f in os.listdir(cache_dir) if
| python | {
"resource": ""
} |
q258475 | sometimesPruneCache | validation | def sometimesPruneCache(p):
''' return decorator to prune cache after calling fn with a probability of p'''
def decorator(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
r = fn(*args, **kwargs)
| python | {
"resource": ""
} |
q258476 | calibrateEB | validation | def calibrateEB(variances, sigma2):
"""
Calibrate noisy variance estimates with empirical Bayes.
Parameters
----------
vars: ndarray
List of variance estimates.
sigma2: int
Estimate of the Monte Carlo noise in vars.
Returns
-------
An array of the calibrated variance estimates
"""
if (sigma2 <= 0 or min(variances) == max(variances)):
return(np.maximum(variances, 0))
sigma = np.sqrt(sigma2)
eb_prior = gfit(variances, sigma)
# Set up a partial execution of the function
part = functools.partial(gbayes, g_est=eb_prior,
sigma=sigma)
if len(variances) | python | {
"resource": ""
} |
q258477 | calc_inbag | validation | def calc_inbag(n_samples, forest):
"""
Derive samples used to create trees in scikit-learn RandomForest objects.
Recovers the samples in each tree from the random state of that tree using
:func:`forest._generate_sample_indices`.
Parameters
----------
n_samples : int
The number of samples used to fit the scikit-learn RandomForest object.
forest : RandomForest
Regressor or Classifier object that is already fit by scikit-learn.
Returns
-------
Array that records how many times a data point was placed in a tree.
Columns are individual trees. Rows are the number of times a sample was
used in a tree.
"""
if not forest.bootstrap:
e_s = "Cannot calculate | python | {
"resource": ""
} |
q258478 | _core_computation | validation | def _core_computation(X_train, X_test, inbag, pred_centered, n_trees,
memory_constrained=False, memory_limit=None,
test_mode=False):
"""
Helper function, that performs the core computation
Parameters
----------
X_train : ndarray
An array with shape (n_train_sample, n_features).
X_test : ndarray
An array with shape (n_test_sample, n_features).
inbag : ndarray
The inbag matrix that fit the data. If set to `None` (default) it
will be inferred from the forest. However, this only works for trees
for which bootstrapping was set to `True`. That is, if sampling was
done with replacement. Otherwise, users need to provide their own
inbag matrix.
pred_centered : ndarray
Centered predictions that are an intermediate result in the
computation.
memory_constrained: boolean (optional)
Whether or not there is a restriction on memory. If False, it is
assumed that a ndarry of shape (n_train_sample,n_test_sample) fits
in main memory. Setting to True can actually provide a speed up if
memory_limit is tuned to the optimal range.
memory_limit: int (optional)
An upper bound for how much memory the itermediate matrices will take
up in Megabytes. This must be provided if memory_constrained=True.
"""
if not memory_constrained:
return np.sum((np.dot(inbag - 1, pred_centered.T) / n_trees) ** 2, 0)
if not memory_limit:
| python | {
"resource": ""
} |
q258479 | _bias_correction | validation | def _bias_correction(V_IJ, inbag, pred_centered, n_trees):
"""
Helper functions that implements bias correction
Parameters
----------
V_IJ : ndarray
Intermediate result in the computation.
inbag : ndarray
The inbag matrix that fit the data. If set to `None` (default) it
will be inferred from the forest. However, this only works for trees
for which bootstrapping was set to `True`. That is, if sampling was
done with replacement. Otherwise, users need to provide their own
inbag matrix.
pred_centered : ndarray
Centered predictions that are an intermediate result in the
computation.
n_trees : int
| python | {
"resource": ""
} |
q258480 | random_forest_error | validation | def random_forest_error(forest, X_train, X_test, inbag=None,
calibrate=True, memory_constrained=False,
memory_limit=None):
"""
Calculate error bars from scikit-learn RandomForest estimators.
RandomForest is a regressor or classifier object
this variance can be used to plot error bars for RandomForest objects
Parameters
----------
forest : RandomForest
Regressor or Classifier object.
X_train : ndarray
An array with shape (n_train_sample, n_features). The design matrix for
training data.
X_test : ndarray
An array with shape (n_test_sample, n_features). The design matrix
for testing data
inbag : ndarray, optional
The inbag matrix that fit the data. If set to `None` (default) it
will be inferred from the forest. However, this only works for trees
for which bootstrapping was set to `True`. That is, if sampling was
done with replacement. Otherwise, users need to provide their own
inbag matrix.
calibrate: boolean, optional
Whether to apply calibration to mitigate Monte Carlo noise.
Some variance estimates may be negative due to Monte Carlo effects if
the number of trees in the forest is too small. To use calibration,
Default: True
memory_constrained: boolean, optional
Whether or not there is a restriction on memory. If False, it is
assumed that a ndarry of shape (n_train_sample,n_test_sample) fits
in main memory. Setting to True can actually provide a speed up if
memory_limit is tuned to the optimal range.
memory_limit: int, optional.
An upper bound for how much memory the itermediate matrices will take
up in Megabytes. This must be provided if memory_constrained=True.
Returns
-------
An array with the unbiased sampling variance (V_IJ_unbiased)
for a RandomForest object.
See Also
----------
:func:`calc_inbag`
Notes
-----
The calculation of error is based on the infinitesimal jackknife variance,
as described in [Wager2014]_ and is a Python implementation of the R code
provided at: https://github.com/swager/randomForestCI
.. [Wager2014] S. Wager, T. Hastie, B. Efron. "Confidence Intervals for
Random Forests: The Jackknife and the Infinitesimal Jackknife", Journal
of Machine Learning Research vol. 15, pp. 1625-1651, 2014.
"""
if inbag is None:
inbag = calc_inbag(X_train.shape[0], forest)
pred = np.array([tree.predict(X_test) for tree in forest]).T
pred_mean = np.mean(pred, 0)
pred_centered = pred - pred_mean
n_trees = forest.n_estimators
V_IJ = _core_computation(X_train, X_test, inbag, pred_centered, n_trees,
memory_constrained, memory_limit)
V_IJ_unbiased = _bias_correction(V_IJ, inbag, pred_centered, n_trees)
# Correct for cases where resampling | python | {
"resource": ""
} |
q258481 | SSLSatchel.generate_self_signed_certificate | validation | def generate_self_signed_certificate(self, domain='', r=None):
"""
Generates a self-signed certificate for use in an internal development
environment for testing SSL pages.
http://almostalldigital.wordpress.com/2013/03/07/self-signed-ssl-certificate-for-ec2-load-balancer/
"""
r = self.local_renderer
r.env.domain = domain or r.env.domain
assert r.env.domain, 'No SSL domain defined.'
role = r or self.genv.ROLE or ALL
ssl_dst = 'roles/%s/ssl' % (role,)
if not os.path.isdir(ssl_dst):
os.makedirs(ssl_dst)
| python | {
"resource": ""
} |
q258482 | SSLSatchel.generate_csr | validation | def generate_csr(self, domain='', r=None):
"""
Creates a certificate signing request to be submitted to a formal
certificate authority to generate a certificate.
Note, the provider may say the CSR must be created on the target server,
but this is not necessary.
"""
r = r or self.local_renderer
r.env.domain = domain or r.env.domain
role = self.genv.ROLE or ALL
site = self.genv.SITE or self.genv.default_site
print('self.genv.default_site:', self.genv.default_site, file=sys.stderr)
print('site.csr0:', site, file=sys.stderr)
ssl_dst = 'roles/%s/ssl' % (role,)
print('ssl_dst:', ssl_dst)
if not os.path.isdir(ssl_dst):
os.makedirs(ssl_dst)
for site, site_data in self.iter_sites():
| python | {
"resource": ""
} |
q258483 | SSLSatchel.get_expiration_date | validation | def get_expiration_date(self, fn):
"""
Reads the expiration date of a local crt file.
"""
r = self.local_renderer
r.env.crt_fn = fn
with hide('running'):
ret = r.local('openssl x509 -noout -in {ssl_crt_fn} -dates', capture=True)
| python | {
"resource": ""
} |
q258484 | SSLSatchel.list_expiration_dates | validation | def list_expiration_dates(self, base='roles/all/ssl'):
"""
Scans through all local .crt files and displays the expiration dates.
"""
max_fn_len = 0
max_date_len = 0
data = []
for fn in os.listdir(base):
fqfn = os.path.join(base, fn)
if not os.path.isfile(fqfn):
continue
if not fn.endswith('.crt'):
continue
expiration_date = self.get_expiration_date(fqfn)
max_fn_len = max(max_fn_len, len(fn))
max_date_len = max(max_date_len, len(str(expiration_date)))
data.append((fn, expiration_date))
print('%s %s %s' % ('Filename'.ljust(max_fn_len), 'Expiration Date'.ljust(max_date_len), 'Expired'))
now | python | {
"resource": ""
} |
q258485 | SSLSatchel.verify_certificate_chain | validation | def verify_certificate_chain(self, base=None, crt=None, csr=None, key=None):
"""
Confirms the key, CSR, and certificate files all match.
"""
from burlap.common import get_verbose, print_fail, print_success
r = self.local_renderer
if base:
crt = base + '.crt'
csr = base + '.csr'
key = base + '.key'
else:
assert crt and csr and key, 'If base not provided, crt and csr and key must be given.'
assert os.path.isfile(crt)
assert os.path.isfile(csr)
assert os.path.isfile(key)
csr_md5 = r.local('openssl req -noout -modulus -in %s | openssl md5' % csr, capture=True)
key_md5 = r.local('openssl rsa -noout -modulus -in %s | openssl md5' % key, capture=True)
| python | {
"resource": ""
} |
q258486 | update_merge | validation | def update_merge(d, u):
"""
Recursively merges two dictionaries.
Uses fabric's AttributeDict so you can reference values via dot-notation.
e.g. env.value1.value2.value3...
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
import collections
for k, v | python | {
"resource": ""
} |
q258487 | check_version | validation | def check_version():
"""
Compares the local version against the latest official version on PyPI and displays a warning message if a newer release is available.
This check can be disabled by setting the environment variable BURLAP_CHECK_VERSION=0.
"""
global CHECK_VERSION
if not CHECK_VERSION:
return
# Ensure we only check once in this process.
CHECK_VERSION = 0
# Lookup most recent remote version.
from six.moves.urllib.request import urlopen
try:
response = urlopen("https://pypi.org/pypi/burlap/json")
data = json.loads(response.read().decode())
remote_release = sorted(tuple(map(int, _.split('.'))) for _ in data['releases'].keys())[-1]
remote_release_str = '.'.join(map(str, remote_release))
local_release = VERSION
local_release_str = '.'.join(map(str, local_release))
# Display warning. | python | {
"resource": ""
} |
q258488 | populate_fabfile | validation | def populate_fabfile():
"""
Automatically includes all submodules and role selectors
in the top-level fabfile using spooky-scary black magic.
This allows us to avoid manually declaring imports for every module, e.g.
import burlap.pip
import burlap.vm
import burlap...
which has the added benefit of allowing us to manually call the commands
without typing "burlap".
This is soley for convenience. If not needed, it can be disabled
by specifying the environment variable:
export BURLAP_POPULATE_STACK=0
"""
stack = inspect.stack()
fab_frame = None
for frame_obj, script_fn, line, _, _, _ in stack:
if 'fabfile.py' in script_fn:
fab_frame = frame_obj
break
if not fab_frame:
return
try:
locals_ = fab_frame.f_locals
for module_name, module in sub_modules.items():
locals_[module_name] = module
for role_name, role_func in role_commands.items():
assert role_name not in sub_modules, \
('The role %s conflicts with a built-in submodule. '
| python | {
"resource": ""
} |
q258489 | task_or_dryrun | validation | def task_or_dryrun(*args, **kwargs):
"""
Decorator declaring the wrapped function to be a new-style task.
May be invoked as a simple, argument-less decorator (i.e. ``@task``) or
with arguments customizing its behavior (e.g. ``@task(alias='myalias')``).
Please see the :ref:`new-style task <task-decorator>` documentation for
details on how to use this decorator.
.. versionchanged:: 1.2
Added the ``alias``, ``aliases``, ``task_class`` and ``default`` | python | {
"resource": ""
} |
q258490 | task | validation | def task(*args, **kwargs):
"""
Decorator for registering a satchel method as a Fabric task.
Can be used like:
@task
def my_method(self):
...
@task(precursors=['other_satchel'])
def my_method(self):
...
"""
precursors = kwargs.pop('precursors', None)
post_callback = kwargs.pop('post_callback', False)
if args and callable(args[0]):
# direct decoration, @task
return _task(*args)
| python | {
"resource": ""
} |
q258491 | FileSatchel.is_file | validation | def is_file(self, path, use_sudo=False):
"""
Check if a path exists, and is a file.
"""
if self.is_local and not use_sudo:
return os.path.isfile(path)
else:
func = use_sudo and _sudo or _run | python | {
"resource": ""
} |
q258492 | FileSatchel.is_dir | validation | def is_dir(self, path, use_sudo=False):
"""
Check if a path exists, and is a directory.
"""
if self.is_local and not use_sudo:
return os.path.isdir(path)
else:
func = use_sudo and | python | {
"resource": ""
} |
q258493 | FileSatchel.is_link | validation | def is_link(self, path, use_sudo=False):
"""
Check if a path exists, and is a symbolic link.
"""
func = use_sudo and _sudo or _run
| python | {
"resource": ""
} |
q258494 | FileSatchel.get_owner | validation | def get_owner(self, path, use_sudo=False):
"""
Get the owner name of a file or directory.
"""
func = use_sudo and run_as_root or self.run
# I'd prefer to use quiet=True, but that's not supported with older
# versions of Fabric.
with self.settings(hide('running', 'stdout'), warn_only=True):
| python | {
"resource": ""
} |
q258495 | FileSatchel.umask | validation | def umask(self, use_sudo=False):
"""
Get the user's umask.
Returns a string such as ``'0002'``, representing the user's umask
as an octal number.
| python | {
"resource": ""
} |
q258496 | FileSatchel.upload_template | validation | def upload_template(self, filename, destination, context=None, use_jinja=False,
template_dir=None, use_sudo=False, backup=True,
mirror_local_mode=False, mode=None,
mkdir=False, chown=False, user=None):
"""
Upload a template file.
This is a wrapper around :func:`fabric.contrib.files.upload_template`
that adds some extra parameters.
If ``mkdir`` is True, then the remote directory will be created, as
the current user or as ``user`` if specified.
If ``chown`` is True, then it will ensure that the current user (or
``user`` if specified) is the owner of the remote file.
"""
if mkdir:
remote_dir = os.path.dirname(destination)
if use_sudo:
self.sudo('mkdir -p %s' % quote(remote_dir), user=user)
else:
self.run('mkdir -p %s' % quote(remote_dir))
if not self.dryrun:
| python | {
"resource": ""
} |
q258497 | FileSatchel.md5sum | validation | def md5sum(self, filename, use_sudo=False):
"""
Compute the MD5 sum of a file.
"""
func = use_sudo and run_as_root or self.run
with self.settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
# Linux (LSB)
if exists(u'/usr/bin/md5sum'):
res = func(u'/usr/bin/md5sum %(filename)s' % locals())
# BSD / OS X
elif exists(u'/sbin/md5'):
res = func(u'/sbin/md5 -r %(filename)s' % locals())
# SmartOS Joyent build
elif exists(u'/opt/local/gnu/bin/md5sum'):
res = func(u'/opt/local/gnu/bin/md5sum %(filename)s' % locals())
# SmartOS Joyent build
# (the former doesn't exist, at least on joyent_20130222T000747Z)
elif exists(u'/opt/local/bin/md5sum'):
res = func(u'/opt/local/bin/md5sum %(filename)s' % locals())
# Try to find ``md5sum`` or ``md5`` on ``$PATH`` or abort
else:
md5sum = func(u'which md5sum')
| python | {
"resource": ""
} |
q258498 | FileSatchel.uncommented_lines | validation | def uncommented_lines(self, filename, use_sudo=False):
"""
Get the lines of a remote file, ignoring empty or commented ones
"""
func = | python | {
"resource": ""
} |
q258499 | FileSatchel.getmtime | validation | def getmtime(self, path, use_sudo=False):
"""
Return the time of last modification of path.
The return value is a number giving the number of seconds since the epoch
Same as :py:func:`os.path.getmtime()`
"""
func = use_sudo and | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.