id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 51 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
225,100 | DataBiosphere/toil | src/toil/fileStore.py | shutdownFileStore | def shutdownFileStore(workflowDir, workflowID):
"""
Run the deferred functions from any prematurely terminated jobs still lingering on the system
and carry out any necessary filestore-specific cleanup.
This is a destructive operation and it is important to ensure that there are no other running
processes on the system that are modifying or using the file store for this workflow.
This is the intended to be the last call to the file store in a Toil run, called by the
batch system cleanup function upon batch system shutdown.
:param str workflowDir: The path to the cache directory
:param str workflowID: The workflow ID for this invocation of the workflow
"""
cacheDir = os.path.join(workflowDir, cacheDirName(workflowID))
if os.path.exists(cacheDir):
# The presence of the cacheDir suggests this was a cached run. We don't need the cache lock
# for any of this since this is the final cleanup of a job and there should be no other
# conflicting processes using the cache.
CachingFileStore.shutdown(cacheDir)
else:
# This absence of cacheDir suggests otherwise.
NonCachingFileStore.shutdown(workflowDir) | python | def shutdownFileStore(workflowDir, workflowID):
cacheDir = os.path.join(workflowDir, cacheDirName(workflowID))
if os.path.exists(cacheDir):
# The presence of the cacheDir suggests this was a cached run. We don't need the cache lock
# for any of this since this is the final cleanup of a job and there should be no other
# conflicting processes using the cache.
CachingFileStore.shutdown(cacheDir)
else:
# This absence of cacheDir suggests otherwise.
NonCachingFileStore.shutdown(workflowDir) | [
"def",
"shutdownFileStore",
"(",
"workflowDir",
",",
"workflowID",
")",
":",
"cacheDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"workflowDir",
",",
"cacheDirName",
"(",
"workflowID",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cacheDir",
"... | Run the deferred functions from any prematurely terminated jobs still lingering on the system
and carry out any necessary filestore-specific cleanup.
This is a destructive operation and it is important to ensure that there are no other running
processes on the system that are modifying or using the file store for this workflow.
This is the intended to be the last call to the file store in a Toil run, called by the
batch system cleanup function upon batch system shutdown.
:param str workflowDir: The path to the cache directory
:param str workflowID: The workflow ID for this invocation of the workflow | [
"Run",
"the",
"deferred",
"functions",
"from",
"any",
"prematurely",
"terminated",
"jobs",
"still",
"lingering",
"on",
"the",
"system",
"and",
"carry",
"out",
"any",
"necessary",
"filestore",
"-",
"specific",
"cleanup",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L1966-L1989 |
225,101 | DataBiosphere/toil | src/toil/fileStore.py | DeferredFunction.create | def create(cls, function, *args, **kwargs):
"""
Capture the given callable and arguments as an instance of this class.
:param callable function: The deferred action to take in the form of a function
:param tuple args: Non-keyword arguments to the function
:param dict kwargs: Keyword arguments to the function
"""
# The general principle is to deserialize as late as possible, i.e. when the function is
# to be invoked, as that will avoid redundantly deserializing deferred functions for
# concurrently running jobs when the cache state is loaded from disk. By implication we
# should serialize as early as possible. We need to serialize the function as well as its
# arguments.
return cls(*list(map(dill.dumps, (function, args, kwargs))),
name=function.__name__,
module=ModuleDescriptor.forModule(function.__module__).globalize()) | python | def create(cls, function, *args, **kwargs):
# The general principle is to deserialize as late as possible, i.e. when the function is
# to be invoked, as that will avoid redundantly deserializing deferred functions for
# concurrently running jobs when the cache state is loaded from disk. By implication we
# should serialize as early as possible. We need to serialize the function as well as its
# arguments.
return cls(*list(map(dill.dumps, (function, args, kwargs))),
name=function.__name__,
module=ModuleDescriptor.forModule(function.__module__).globalize()) | [
"def",
"create",
"(",
"cls",
",",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# The general principle is to deserialize as late as possible, i.e. when the function is",
"# to be invoked, as that will avoid redundantly deserializing deferred functions for",
"# ... | Capture the given callable and arguments as an instance of this class.
:param callable function: The deferred action to take in the form of a function
:param tuple args: Non-keyword arguments to the function
:param dict kwargs: Keyword arguments to the function | [
"Capture",
"the",
"given",
"callable",
"and",
"arguments",
"as",
"an",
"instance",
"of",
"this",
"class",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L60-L75 |
225,102 | DataBiosphere/toil | src/toil/fileStore.py | DeferredFunction.invoke | def invoke(self):
"""
Invoke the captured function with the captured arguments.
"""
logger.debug('Running deferred function %s.', self)
self.module.makeLoadable()
function, args, kwargs = list(map(dill.loads, (self.function, self.args, self.kwargs)))
return function(*args, **kwargs) | python | def invoke(self):
logger.debug('Running deferred function %s.', self)
self.module.makeLoadable()
function, args, kwargs = list(map(dill.loads, (self.function, self.args, self.kwargs)))
return function(*args, **kwargs) | [
"def",
"invoke",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Running deferred function %s.'",
",",
"self",
")",
"self",
".",
"module",
".",
"makeLoadable",
"(",
")",
"function",
",",
"args",
",",
"kwargs",
"=",
"list",
"(",
"map",
"(",
"dill",
... | Invoke the captured function with the captured arguments. | [
"Invoke",
"the",
"captured",
"function",
"with",
"the",
"captured",
"arguments",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L77-L84 |
225,103 | DataBiosphere/toil | src/toil/fileStore.py | FileStore.getLocalTempDir | def getLocalTempDir(self):
"""
Get a new local temporary directory in which to write files that persist for the duration of
the job.
:return: The absolute path to a new local temporary directory. This directory will exist
for the duration of the job only, and is guaranteed to be deleted once the job
terminates, removing all files it contains recursively.
:rtype: str
"""
return os.path.abspath(tempfile.mkdtemp(prefix="t", dir=self.localTempDir)) | python | def getLocalTempDir(self):
return os.path.abspath(tempfile.mkdtemp(prefix="t", dir=self.localTempDir)) | [
"def",
"getLocalTempDir",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"tempfile",
".",
"mkdtemp",
"(",
"prefix",
"=",
"\"t\"",
",",
"dir",
"=",
"self",
".",
"localTempDir",
")",
")"
] | Get a new local temporary directory in which to write files that persist for the duration of
the job.
:return: The absolute path to a new local temporary directory. This directory will exist
for the duration of the job only, and is guaranteed to be deleted once the job
terminates, removing all files it contains recursively.
:rtype: str | [
"Get",
"a",
"new",
"local",
"temporary",
"directory",
"in",
"which",
"to",
"write",
"files",
"that",
"persist",
"for",
"the",
"duration",
"of",
"the",
"job",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L130-L140 |
225,104 | DataBiosphere/toil | src/toil/fileStore.py | FileStore.getLocalTempFile | def getLocalTempFile(self):
"""
Get a new local temporary file that will persist for the duration of the job.
:return: The absolute path to a local temporary file. This file will exist for the
duration of the job only, and is guaranteed to be deleted once the job terminates.
:rtype: str
"""
handle, tmpFile = tempfile.mkstemp(prefix="tmp", suffix=".tmp", dir=self.localTempDir)
os.close(handle)
return os.path.abspath(tmpFile) | python | def getLocalTempFile(self):
handle, tmpFile = tempfile.mkstemp(prefix="tmp", suffix=".tmp", dir=self.localTempDir)
os.close(handle)
return os.path.abspath(tmpFile) | [
"def",
"getLocalTempFile",
"(",
"self",
")",
":",
"handle",
",",
"tmpFile",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"\"tmp\"",
",",
"suffix",
"=",
"\".tmp\"",
",",
"dir",
"=",
"self",
".",
"localTempDir",
")",
"os",
".",
"close",
"(",
"han... | Get a new local temporary file that will persist for the duration of the job.
:return: The absolute path to a local temporary file. This file will exist for the
duration of the job only, and is guaranteed to be deleted once the job terminates.
:rtype: str | [
"Get",
"a",
"new",
"local",
"temporary",
"file",
"that",
"will",
"persist",
"for",
"the",
"duration",
"of",
"the",
"job",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L142-L152 |
225,105 | DataBiosphere/toil | src/toil/fileStore.py | FileStore.writeGlobalFileStream | def writeGlobalFileStream(self, cleanup=False):
"""
Similar to writeGlobalFile, but allows the writing of a stream to the job store.
The yielded file handle does not need to and should not be closed explicitly.
:param bool cleanup: is as in :func:`toil.fileStore.FileStore.writeGlobalFile`.
:return: A context manager yielding a tuple of
1) a file handle which can be written to and
2) the toil.fileStore.FileID of the resulting file in the job store.
"""
# TODO: Make this work with FileID
with self.jobStore.writeFileStream(None if not cleanup else self.jobGraph.jobStoreID) as (backingStream, fileStoreID):
# We have a string version of the file ID, and the backing stream.
# We need to yield a stream the caller can write to, and a FileID
# that accurately reflects the size of the data written to the
# stream. We assume the stream is not seekable.
# Make and keep a reference to the file ID, which is currently empty
fileID = FileID(fileStoreID, 0)
# Wrap the stream to increment the file ID's size for each byte written
wrappedStream = WriteWatchingStream(backingStream)
# When the stream is written to, count the bytes
def handle(numBytes):
fileID.size += numBytes
wrappedStream.onWrite(handle)
yield wrappedStream, fileID | python | def writeGlobalFileStream(self, cleanup=False):
# TODO: Make this work with FileID
with self.jobStore.writeFileStream(None if not cleanup else self.jobGraph.jobStoreID) as (backingStream, fileStoreID):
# We have a string version of the file ID, and the backing stream.
# We need to yield a stream the caller can write to, and a FileID
# that accurately reflects the size of the data written to the
# stream. We assume the stream is not seekable.
# Make and keep a reference to the file ID, which is currently empty
fileID = FileID(fileStoreID, 0)
# Wrap the stream to increment the file ID's size for each byte written
wrappedStream = WriteWatchingStream(backingStream)
# When the stream is written to, count the bytes
def handle(numBytes):
fileID.size += numBytes
wrappedStream.onWrite(handle)
yield wrappedStream, fileID | [
"def",
"writeGlobalFileStream",
"(",
"self",
",",
"cleanup",
"=",
"False",
")",
":",
"# TODO: Make this work with FileID",
"with",
"self",
".",
"jobStore",
".",
"writeFileStream",
"(",
"None",
"if",
"not",
"cleanup",
"else",
"self",
".",
"jobGraph",
".",
"jobSto... | Similar to writeGlobalFile, but allows the writing of a stream to the job store.
The yielded file handle does not need to and should not be closed explicitly.
:param bool cleanup: is as in :func:`toil.fileStore.FileStore.writeGlobalFile`.
:return: A context manager yielding a tuple of
1) a file handle which can be written to and
2) the toil.fileStore.FileID of the resulting file in the job store. | [
"Similar",
"to",
"writeGlobalFile",
"but",
"allows",
"the",
"writing",
"of",
"a",
"stream",
"to",
"the",
"job",
"store",
".",
"The",
"yielded",
"file",
"handle",
"does",
"not",
"need",
"to",
"and",
"should",
"not",
"be",
"closed",
"explicitly",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L184-L213 |
225,106 | DataBiosphere/toil | src/toil/fileStore.py | FileStore.readGlobalFile | def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
"""
Makes the file associated with fileStoreID available locally. If mutable is True,
then a copy of the file will be created locally so that the original is not modified
and does not change the file for other jobs. If mutable is False, then a link can
be created to the file, saving disk resources.
If a user path is specified, it is used as the destination. If a user path isn't
specified, the file is stored in the local temp directory with an encoded name.
:param toil.fileStore.FileID fileStoreID: job store id for the file
:param string userPath: a path to the name of file to which the global file will be copied
or hard-linked (see below).
:param bool cache: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:param bool mutable: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:return: An absolute path to a local, temporary copy of the file keyed by fileStoreID.
:rtype: str
"""
raise NotImplementedError() | python | def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
raise NotImplementedError() | [
"def",
"readGlobalFile",
"(",
"self",
",",
"fileStoreID",
",",
"userPath",
"=",
"None",
",",
"cache",
"=",
"True",
",",
"mutable",
"=",
"False",
",",
"symlink",
"=",
"False",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | Makes the file associated with fileStoreID available locally. If mutable is True,
then a copy of the file will be created locally so that the original is not modified
and does not change the file for other jobs. If mutable is False, then a link can
be created to the file, saving disk resources.
If a user path is specified, it is used as the destination. If a user path isn't
specified, the file is stored in the local temp directory with an encoded name.
:param toil.fileStore.FileID fileStoreID: job store id for the file
:param string userPath: a path to the name of file to which the global file will be copied
or hard-linked (see below).
:param bool cache: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:param bool mutable: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:return: An absolute path to a local, temporary copy of the file keyed by fileStoreID.
:rtype: str | [
"Makes",
"the",
"file",
"associated",
"with",
"fileStoreID",
"available",
"locally",
".",
"If",
"mutable",
"is",
"True",
"then",
"a",
"copy",
"of",
"the",
"file",
"will",
"be",
"created",
"locally",
"so",
"that",
"the",
"original",
"is",
"not",
"modified",
... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L216-L234 |
225,107 | DataBiosphere/toil | src/toil/fileStore.py | FileStore._runDeferredFunctions | def _runDeferredFunctions(deferredFunctions):
"""
Invoke the specified deferred functions and return a list of names of functions that
raised an exception while being invoked.
:param list[DeferredFunction] deferredFunctions: the DeferredFunctions to run
:rtype: list[str]
"""
failures = []
for deferredFunction in deferredFunctions:
try:
deferredFunction.invoke()
except:
failures.append(deferredFunction.name)
logger.exception('%s failed.', deferredFunction)
return failures | python | def _runDeferredFunctions(deferredFunctions):
failures = []
for deferredFunction in deferredFunctions:
try:
deferredFunction.invoke()
except:
failures.append(deferredFunction.name)
logger.exception('%s failed.', deferredFunction)
return failures | [
"def",
"_runDeferredFunctions",
"(",
"deferredFunctions",
")",
":",
"failures",
"=",
"[",
"]",
"for",
"deferredFunction",
"in",
"deferredFunctions",
":",
"try",
":",
"deferredFunction",
".",
"invoke",
"(",
")",
"except",
":",
"failures",
".",
"append",
"(",
"d... | Invoke the specified deferred functions and return a list of names of functions that
raised an exception while being invoked.
:param list[DeferredFunction] deferredFunctions: the DeferredFunctions to run
:rtype: list[str] | [
"Invoke",
"the",
"specified",
"deferred",
"functions",
"and",
"return",
"a",
"list",
"of",
"names",
"of",
"functions",
"that",
"raised",
"an",
"exception",
"while",
"being",
"invoked",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L361-L376 |
225,108 | DataBiosphere/toil | src/toil/fileStore.py | FileStore.logToMaster | def logToMaster(self, text, level=logging.INFO):
"""
Send a logging message to the leader. The message will also be \
logged by the worker at the same level.
:param text: The string to log.
:param int level: The logging level.
"""
logger.log(level=level, msg=("LOG-TO-MASTER: " + text))
self.loggingMessages.append(dict(text=text, level=level)) | python | def logToMaster(self, text, level=logging.INFO):
logger.log(level=level, msg=("LOG-TO-MASTER: " + text))
self.loggingMessages.append(dict(text=text, level=level)) | [
"def",
"logToMaster",
"(",
"self",
",",
"text",
",",
"level",
"=",
"logging",
".",
"INFO",
")",
":",
"logger",
".",
"log",
"(",
"level",
"=",
"level",
",",
"msg",
"=",
"(",
"\"LOG-TO-MASTER: \"",
"+",
"text",
")",
")",
"self",
".",
"loggingMessages",
... | Send a logging message to the leader. The message will also be \
logged by the worker at the same level.
:param text: The string to log.
:param int level: The logging level. | [
"Send",
"a",
"logging",
"message",
"to",
"the",
"leader",
".",
"The",
"message",
"will",
"also",
"be",
"\\",
"logged",
"by",
"the",
"worker",
"at",
"the",
"same",
"level",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L379-L388 |
225,109 | DataBiosphere/toil | src/toil/fileStore.py | FileStore._pidExists | def _pidExists(pid):
"""
This will return True if the process associated with pid is still running on the machine.
This is based on stackoverflow question 568271.
:param int pid: ID of the process to check for
:return: True/False
:rtype: bool
"""
assert pid > 0
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
else:
raise
else:
return True | python | def _pidExists(pid):
assert pid > 0
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
else:
raise
else:
return True | [
"def",
"_pidExists",
"(",
"pid",
")",
":",
"assert",
"pid",
">",
"0",
"try",
":",
"os",
".",
"kill",
"(",
"pid",
",",
"0",
")",
"except",
"OSError",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"==",
"errno",
".",
"ESRCH",
":",
"# ESRCH == No such ... | This will return True if the process associated with pid is still running on the machine.
This is based on stackoverflow question 568271.
:param int pid: ID of the process to check for
:return: True/False
:rtype: bool | [
"This",
"will",
"return",
"True",
"if",
"the",
"process",
"associated",
"with",
"pid",
"is",
"still",
"running",
"on",
"the",
"machine",
".",
"This",
"is",
"based",
"on",
"stackoverflow",
"question",
"568271",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L409-L428 |
225,110 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore.open | def open(self, job):
"""
This context manager decorated method allows cache-specific operations to be conducted
before and after the execution of a job in worker.py
"""
# Create a working directory for the job
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
# Check the status of all jobs on this node. If there are jobs that started and died before
# cleaning up their presence from the cache state file, restore the cache file to a state
# where the jobs don't exist.
with self._CacheState.open(self) as cacheInfo:
self.findAndHandleDeadJobs(cacheInfo)
# While we have a lock on the cache file, run a naive check to see if jobs on this node
# have greatly gone over their requested limits.
if cacheInfo.sigmaJob < 0:
logger.warning('Detecting that one or more jobs on this node have used more '
'resources than requested. Turn on debug logs to see more'
'information on cache usage.')
# Get the requirements for the job and clean the cache if necessary. cleanCache will
# ensure that the requirements for this job are stored in the state file.
jobReqs = job.disk
# Cleanup the cache to free up enough space for this job (if needed)
self.cleanCache(jobReqs)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Please reconsider modifying "
"the user script to avoid the chance of failure due to "
"incorrectly requested resources. " + logString,
level=logging.WARNING)
os.chdir(startingDir)
self.cleanupInProgress = True
# Delete all the job specific files and return sizes to jobReqs
self.returnJobReqs(jobReqs)
with self._CacheState.open(self) as cacheInfo:
# Carry out any user-defined cleanup actions
deferredFunctions = cacheInfo.jobState[self.jobID]['deferredFunctions']
failures = self._runDeferredFunctions(deferredFunctions)
for failure in failures:
self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN)
# Finally delete the job from the cache state file
cacheInfo.jobState.pop(self.jobID) | python | def open(self, job):
# Create a working directory for the job
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
# Check the status of all jobs on this node. If there are jobs that started and died before
# cleaning up their presence from the cache state file, restore the cache file to a state
# where the jobs don't exist.
with self._CacheState.open(self) as cacheInfo:
self.findAndHandleDeadJobs(cacheInfo)
# While we have a lock on the cache file, run a naive check to see if jobs on this node
# have greatly gone over their requested limits.
if cacheInfo.sigmaJob < 0:
logger.warning('Detecting that one or more jobs on this node have used more '
'resources than requested. Turn on debug logs to see more'
'information on cache usage.')
# Get the requirements for the job and clean the cache if necessary. cleanCache will
# ensure that the requirements for this job are stored in the state file.
jobReqs = job.disk
# Cleanup the cache to free up enough space for this job (if needed)
self.cleanCache(jobReqs)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Please reconsider modifying "
"the user script to avoid the chance of failure due to "
"incorrectly requested resources. " + logString,
level=logging.WARNING)
os.chdir(startingDir)
self.cleanupInProgress = True
# Delete all the job specific files and return sizes to jobReqs
self.returnJobReqs(jobReqs)
with self._CacheState.open(self) as cacheInfo:
# Carry out any user-defined cleanup actions
deferredFunctions = cacheInfo.jobState[self.jobID]['deferredFunctions']
failures = self._runDeferredFunctions(deferredFunctions)
for failure in failures:
self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN)
# Finally delete the job from the cache state file
cacheInfo.jobState.pop(self.jobID) | [
"def",
"open",
"(",
"self",
",",
"job",
")",
":",
"# Create a working directory for the job",
"startingDir",
"=",
"os",
".",
"getcwd",
"(",
")",
"self",
".",
"localTempDir",
"=",
"makePublicDir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"loca... | This context manager decorated method allows cache-specific operations to be conducted
before and after the execution of a job in worker.py | [
"This",
"context",
"manager",
"decorated",
"method",
"allows",
"cache",
"-",
"specific",
"operations",
"to",
"be",
"conducted",
"before",
"and",
"after",
"the",
"execution",
"of",
"a",
"job",
"in",
"worker",
".",
"py"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L486-L541 |
225,111 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore._setupCache | def _setupCache(self):
"""
Setup the cache based on the provided values for localCacheDir.
"""
# we first check whether the cache directory exists. If it doesn't, create it.
if not os.path.exists(self.localCacheDir):
# Create a temporary directory as this worker's private cache. If all goes well, it
# will be renamed into the cache for this node.
personalCacheDir = ''.join([os.path.dirname(self.localCacheDir), '/.ctmp-',
str(uuid.uuid4())])
os.mkdir(personalCacheDir, 0o755)
self._createCacheLockFile(personalCacheDir)
try:
os.rename(personalCacheDir, self.localCacheDir)
except OSError as err:
# The only acceptable FAIL case is that the destination is a non-empty directory
# directory. Assuming (it's ambiguous) atomic renaming of directories, if the
# dst is non-empty, it only means that another worker has beaten this one to the
# rename.
if err.errno == errno.ENOTEMPTY:
# Cleanup your own mess. It's only polite.
shutil.rmtree(personalCacheDir)
else:
raise
# You can't reach here unless a local cache directory has been created successfully
with self._CacheState.open(self) as cacheInfo:
# Ensure this cache is from the correct attempt at the workflow! If it isn't, we
# need to reset the cache lock file
if cacheInfo.attemptNumber != self.workflowAttemptNumber:
if cacheInfo.nlink == 2:
cacheInfo.cached = 0 # cached file sizes are accounted for by job store
else:
allCachedFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
cacheInfo.cached = sum([os.stat(cachedFile).st_size
for cachedFile in allCachedFiles])
# TODO: Delete the working directories
cacheInfo.sigmaJob = 0
cacheInfo.attemptNumber = self.workflowAttemptNumber
self.nlinkThreshold = cacheInfo.nlink | python | def _setupCache(self):
# we first check whether the cache directory exists. If it doesn't, create it.
if not os.path.exists(self.localCacheDir):
# Create a temporary directory as this worker's private cache. If all goes well, it
# will be renamed into the cache for this node.
personalCacheDir = ''.join([os.path.dirname(self.localCacheDir), '/.ctmp-',
str(uuid.uuid4())])
os.mkdir(personalCacheDir, 0o755)
self._createCacheLockFile(personalCacheDir)
try:
os.rename(personalCacheDir, self.localCacheDir)
except OSError as err:
# The only acceptable FAIL case is that the destination is a non-empty directory
# directory. Assuming (it's ambiguous) atomic renaming of directories, if the
# dst is non-empty, it only means that another worker has beaten this one to the
# rename.
if err.errno == errno.ENOTEMPTY:
# Cleanup your own mess. It's only polite.
shutil.rmtree(personalCacheDir)
else:
raise
# You can't reach here unless a local cache directory has been created successfully
with self._CacheState.open(self) as cacheInfo:
# Ensure this cache is from the correct attempt at the workflow! If it isn't, we
# need to reset the cache lock file
if cacheInfo.attemptNumber != self.workflowAttemptNumber:
if cacheInfo.nlink == 2:
cacheInfo.cached = 0 # cached file sizes are accounted for by job store
else:
allCachedFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
cacheInfo.cached = sum([os.stat(cachedFile).st_size
for cachedFile in allCachedFiles])
# TODO: Delete the working directories
cacheInfo.sigmaJob = 0
cacheInfo.attemptNumber = self.workflowAttemptNumber
self.nlinkThreshold = cacheInfo.nlink | [
"def",
"_setupCache",
"(",
"self",
")",
":",
"# we first check whether the cache directory exists. If it doesn't, create it.",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"localCacheDir",
")",
":",
"# Create a temporary directory as this worker's private ... | Setup the cache based on the provided values for localCacheDir. | [
"Setup",
"the",
"cache",
"based",
"on",
"the",
"provided",
"values",
"for",
"localCacheDir",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L887-L927 |
225,112 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore._createCacheLockFile | def _createCacheLockFile(self, tempCacheDir):
"""
Create the cache lock file file to contain the state of the cache on the node.
:param str tempCacheDir: Temporary directory to use for setting up a cache lock file the
first time.
"""
# The nlink threshold is setup along with the first instance of the cache class on the
# node. It needs the cache dir to sniff link count for files form the job store.
self.setNlinkThreshold(tempCacheDir)
# Get the free space on the device
freeSpace, _ = getFileSystemSize(tempCacheDir)
# Create the cache lock file.
open(os.path.join(tempCacheDir, os.path.basename(self.cacheLockFile)), 'w').close()
# Setup the cache state file
personalCacheStateFile = os.path.join(tempCacheDir,
os.path.basename(self.cacheStateFile))
# Setup the initial values for the cache state file in a dict
cacheInfo = self._CacheState({
'nlink': self.nlinkThreshold,
'attemptNumber': self.workflowAttemptNumber,
'total': freeSpace,
'cached': 0,
'sigmaJob': 0,
'cacheDir': self.localCacheDir,
'jobState': {}})
cacheInfo.write(personalCacheStateFile) | python | def _createCacheLockFile(self, tempCacheDir):
# The nlink threshold is setup along with the first instance of the cache class on the
# node. It needs the cache dir to sniff link count for files form the job store.
self.setNlinkThreshold(tempCacheDir)
# Get the free space on the device
freeSpace, _ = getFileSystemSize(tempCacheDir)
# Create the cache lock file.
open(os.path.join(tempCacheDir, os.path.basename(self.cacheLockFile)), 'w').close()
# Setup the cache state file
personalCacheStateFile = os.path.join(tempCacheDir,
os.path.basename(self.cacheStateFile))
# Setup the initial values for the cache state file in a dict
cacheInfo = self._CacheState({
'nlink': self.nlinkThreshold,
'attemptNumber': self.workflowAttemptNumber,
'total': freeSpace,
'cached': 0,
'sigmaJob': 0,
'cacheDir': self.localCacheDir,
'jobState': {}})
cacheInfo.write(personalCacheStateFile) | [
"def",
"_createCacheLockFile",
"(",
"self",
",",
"tempCacheDir",
")",
":",
"# The nlink threshold is setup along with the first instance of the cache class on the",
"# node. It needs the cache dir to sniff link count for files form the job store.",
"self",
".",
"setNlinkThreshold",
"(",
... | Create the cache lock file file to contain the state of the cache on the node.
:param str tempCacheDir: Temporary directory to use for setting up a cache lock file the
first time. | [
"Create",
"the",
"cache",
"lock",
"file",
"file",
"to",
"contain",
"the",
"state",
"of",
"the",
"cache",
"on",
"the",
"node",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L929-L955 |
225,113 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore.decodedFileID | def decodedFileID(self, cachedFilePath):
"""
Decode a cached fileName back to a job store file ID.
:param str cachedFilePath: Path to the cached file
:return: The jobstore file ID associated with the file
:rtype: str
"""
fileDir, fileName = os.path.split(cachedFilePath)
assert fileDir == self.localCacheDir, 'Can\'t decode uncached file names'
# We encode and decode here because base64 can't work with unencoded text
# Its probably worth, later, converting all file name variables to bytes
# and not text.
return base64.urlsafe_b64decode(fileName.encode('utf-8')).decode('utf-8') | python | def decodedFileID(self, cachedFilePath):
fileDir, fileName = os.path.split(cachedFilePath)
assert fileDir == self.localCacheDir, 'Can\'t decode uncached file names'
# We encode and decode here because base64 can't work with unencoded text
# Its probably worth, later, converting all file name variables to bytes
# and not text.
return base64.urlsafe_b64decode(fileName.encode('utf-8')).decode('utf-8') | [
"def",
"decodedFileID",
"(",
"self",
",",
"cachedFilePath",
")",
":",
"fileDir",
",",
"fileName",
"=",
"os",
".",
"path",
".",
"split",
"(",
"cachedFilePath",
")",
"assert",
"fileDir",
"==",
"self",
".",
"localCacheDir",
",",
"'Can\\'t decode uncached file names... | Decode a cached fileName back to a job store file ID.
:param str cachedFilePath: Path to the cached file
:return: The jobstore file ID associated with the file
:rtype: str | [
"Decode",
"a",
"cached",
"fileName",
"back",
"to",
"a",
"job",
"store",
"file",
"ID",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L979-L992 |
225,114 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore.addToCache | def addToCache(self, localFilePath, jobStoreFileID, callingFunc, mutable=False):
"""
Used to process the caching of a file. This depends on whether a file is being written
to file store, or read from it.
WRITING
The file is in localTempDir. It needs to be linked into cache if possible.
READING
The file is already in the cache dir. Depending on whether it is modifiable or not, does
it need to be linked to the required location, or copied. If it is copied, can the file
still be retained in cache?
:param str localFilePath: Path to the Source file
:param jobStoreFileID: jobStoreID for the file
:param str callingFunc: Who called this function, 'write' or 'read'
:param bool mutable: See modifiable in readGlobalFile
"""
assert callingFunc in ('read', 'write')
with self.cacheLock() as lockFileHandle:
cachedFile = self.encodedFileID(jobStoreFileID)
# The file to be cached MUST originate in the environment of the TOIL temp directory
if (os.stat(self.localCacheDir).st_dev !=
os.stat(os.path.dirname(localFilePath)).st_dev):
raise InvalidSourceCacheError('Attempting to cache a file across file systems '
'cachedir = %s, file = %s.' % (self.localCacheDir,
localFilePath))
if not localFilePath.startswith(self.localTempDir):
raise InvalidSourceCacheError('Attempting a cache operation on a non-local file '
'%s.' % localFilePath)
if callingFunc == 'read' and mutable:
shutil.copyfile(cachedFile, localFilePath)
fileSize = os.stat(cachedFile).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
cacheInfo.cached += fileSize if cacheInfo.nlink != 2 else 0
if not cacheInfo.isBalanced():
os.remove(cachedFile)
cacheInfo.cached -= fileSize if cacheInfo.nlink != 2 else 0
logger.debug('Could not download both download ' +
'%s as mutable and add to ' % os.path.basename(localFilePath) +
'cache. Hence only mutable copy retained.')
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, localFilePath, -1, False)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
# There are two possibilities, read and immutable, and write. both cases do
# almost the same thing except for the direction of the os.link hence we're
# writing them together.
if callingFunc == 'read': # and mutable is inherently False
src = cachedFile
dest = localFilePath
# To mirror behaviour of shutil.copyfile
if os.path.exists(dest):
os.remove(dest)
else: # write
src = localFilePath
dest = cachedFile
try:
os.link(src, dest)
except OSError as err:
if err.errno != errno.EEXIST:
raise
# If we get the EEXIST error, it can only be from write since in read we are
# explicitly deleting the file. This shouldn't happen with the .partial
# logic hence we raise a cache error.
raise CacheError('Attempting to recache a file %s.' % src)
else:
# Chmod the cached file. Cached files can never be modified.
os.chmod(cachedFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Return the filesize of cachedFile to the job and increase the cached size
# The values passed here don't matter since rFS looks at the file only for
# the stat
self.returnFileSize(jobStoreFileID, localFilePath, lockFileHandle,
fileAlreadyCached=False)
if callingFunc == 'read':
logger.debug('CACHE: Read file with ID \'%s\' from the cache.' %
jobStoreFileID)
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID) | python | def addToCache(self, localFilePath, jobStoreFileID, callingFunc, mutable=False):
assert callingFunc in ('read', 'write')
with self.cacheLock() as lockFileHandle:
cachedFile = self.encodedFileID(jobStoreFileID)
# The file to be cached MUST originate in the environment of the TOIL temp directory
if (os.stat(self.localCacheDir).st_dev !=
os.stat(os.path.dirname(localFilePath)).st_dev):
raise InvalidSourceCacheError('Attempting to cache a file across file systems '
'cachedir = %s, file = %s.' % (self.localCacheDir,
localFilePath))
if not localFilePath.startswith(self.localTempDir):
raise InvalidSourceCacheError('Attempting a cache operation on a non-local file '
'%s.' % localFilePath)
if callingFunc == 'read' and mutable:
shutil.copyfile(cachedFile, localFilePath)
fileSize = os.stat(cachedFile).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
cacheInfo.cached += fileSize if cacheInfo.nlink != 2 else 0
if not cacheInfo.isBalanced():
os.remove(cachedFile)
cacheInfo.cached -= fileSize if cacheInfo.nlink != 2 else 0
logger.debug('Could not download both download ' +
'%s as mutable and add to ' % os.path.basename(localFilePath) +
'cache. Hence only mutable copy retained.')
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, localFilePath, -1, False)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
# There are two possibilities, read and immutable, and write. both cases do
# almost the same thing except for the direction of the os.link hence we're
# writing them together.
if callingFunc == 'read': # and mutable is inherently False
src = cachedFile
dest = localFilePath
# To mirror behaviour of shutil.copyfile
if os.path.exists(dest):
os.remove(dest)
else: # write
src = localFilePath
dest = cachedFile
try:
os.link(src, dest)
except OSError as err:
if err.errno != errno.EEXIST:
raise
# If we get the EEXIST error, it can only be from write since in read we are
# explicitly deleting the file. This shouldn't happen with the .partial
# logic hence we raise a cache error.
raise CacheError('Attempting to recache a file %s.' % src)
else:
# Chmod the cached file. Cached files can never be modified.
os.chmod(cachedFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Return the filesize of cachedFile to the job and increase the cached size
# The values passed here don't matter since rFS looks at the file only for
# the stat
self.returnFileSize(jobStoreFileID, localFilePath, lockFileHandle,
fileAlreadyCached=False)
if callingFunc == 'read':
logger.debug('CACHE: Read file with ID \'%s\' from the cache.' %
jobStoreFileID)
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID) | [
"def",
"addToCache",
"(",
"self",
",",
"localFilePath",
",",
"jobStoreFileID",
",",
"callingFunc",
",",
"mutable",
"=",
"False",
")",
":",
"assert",
"callingFunc",
"in",
"(",
"'read'",
",",
"'write'",
")",
"with",
"self",
".",
"cacheLock",
"(",
")",
"as",
... | Used to process the caching of a file. This depends on whether a file is being written
to file store, or read from it.
WRITING
The file is in localTempDir. It needs to be linked into cache if possible.
READING
The file is already in the cache dir. Depending on whether it is modifiable or not, does
it need to be linked to the required location, or copied. If it is copied, can the file
still be retained in cache?
:param str localFilePath: Path to the Source file
:param jobStoreFileID: jobStoreID for the file
:param str callingFunc: Who called this function, 'write' or 'read'
:param bool mutable: See modifiable in readGlobalFile | [
"Used",
"to",
"process",
"the",
"caching",
"of",
"a",
"file",
".",
"This",
"depends",
"on",
"whether",
"a",
"file",
"is",
"being",
"written",
"to",
"file",
"store",
"or",
"read",
"from",
"it",
".",
"WRITING",
"The",
"file",
"is",
"in",
"localTempDir",
... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L994-L1075 |
225,115 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore.cleanCache | def cleanCache(self, newJobReqs):
"""
Cleanup all files in the cache directory to ensure that at lead newJobReqs are available
for use.
:param float newJobReqs: the total number of bytes of files allowed in the cache.
"""
with self._CacheState.open(self) as cacheInfo:
# Add the new job's disk requirements to the sigmaJobDisk variable
cacheInfo.sigmaJob += newJobReqs
# Initialize the job state here. we use a partial in the jobSpecificFiles call so
# that this entire thing is pickleable. Based on answer by user Nathaniel Gentile at
# http://stackoverflow.com/questions/2600790
assert self.jobID not in cacheInfo.jobState
cacheInfo.jobState[self.jobID] = {
'jobName': self.jobName,
'jobReqs': newJobReqs,
'jobDir': self.localTempDir,
'jobSpecificFiles': defaultdict(partial(defaultdict,int)),
'filesToFSIDs': defaultdict(set),
'pid': os.getpid(),
'deferredFunctions': []}
# If the caching equation is balanced, do nothing.
if cacheInfo.isBalanced():
return None
# List of deletable cached files. A deletable cache file is one
# that is not in use by any other worker (identified by the number of symlinks to
# the file)
allCacheFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
allCacheFiles = [(path, os.stat(path)) for path in allCacheFiles]
# TODO mtime vs ctime
deletableCacheFiles = {(path, inode.st_mtime, inode.st_size)
for path, inode in allCacheFiles
if inode.st_nlink == self.nlinkThreshold}
# Sort in descending order of mtime so the first items to be popped from the list
# are the least recently created.
deletableCacheFiles = sorted(deletableCacheFiles, key=lambda x: (-x[1], -x[2]))
logger.debug('CACHE: Need %s bytes for new job. Detecting an estimated %s (out of a '
'total %s) bytes available for running the new job. The size of the cache '
'is %s bytes.', newJobReqs,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)),
cacheInfo.total, cacheInfo.cached)
logger.debug('CACHE: Evicting files to make room for the new job.')
# Now do the actual file removal
totalEvicted = 0
while not cacheInfo.isBalanced() and len(deletableCacheFiles) > 0:
cachedFile, fileCreateTime, cachedFileSize = deletableCacheFiles.pop()
os.remove(cachedFile)
cacheInfo.cached -= cachedFileSize if self.nlinkThreshold != 2 else 0
totalEvicted += cachedFileSize
assert cacheInfo.cached >= 0
logger.debug('CACHE: Evicted file with ID \'%s\' (%s bytes)' %
(self.decodedFileID(cachedFile), cachedFileSize))
logger.debug('CACHE: Evicted a total of %s bytes. Available space is now %s bytes.',
totalEvicted,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)))
if not cacheInfo.isBalanced():
raise CacheUnbalancedError() | python | def cleanCache(self, newJobReqs):
with self._CacheState.open(self) as cacheInfo:
# Add the new job's disk requirements to the sigmaJobDisk variable
cacheInfo.sigmaJob += newJobReqs
# Initialize the job state here. we use a partial in the jobSpecificFiles call so
# that this entire thing is pickleable. Based on answer by user Nathaniel Gentile at
# http://stackoverflow.com/questions/2600790
assert self.jobID not in cacheInfo.jobState
cacheInfo.jobState[self.jobID] = {
'jobName': self.jobName,
'jobReqs': newJobReqs,
'jobDir': self.localTempDir,
'jobSpecificFiles': defaultdict(partial(defaultdict,int)),
'filesToFSIDs': defaultdict(set),
'pid': os.getpid(),
'deferredFunctions': []}
# If the caching equation is balanced, do nothing.
if cacheInfo.isBalanced():
return None
# List of deletable cached files. A deletable cache file is one
# that is not in use by any other worker (identified by the number of symlinks to
# the file)
allCacheFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
allCacheFiles = [(path, os.stat(path)) for path in allCacheFiles]
# TODO mtime vs ctime
deletableCacheFiles = {(path, inode.st_mtime, inode.st_size)
for path, inode in allCacheFiles
if inode.st_nlink == self.nlinkThreshold}
# Sort in descending order of mtime so the first items to be popped from the list
# are the least recently created.
deletableCacheFiles = sorted(deletableCacheFiles, key=lambda x: (-x[1], -x[2]))
logger.debug('CACHE: Need %s bytes for new job. Detecting an estimated %s (out of a '
'total %s) bytes available for running the new job. The size of the cache '
'is %s bytes.', newJobReqs,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)),
cacheInfo.total, cacheInfo.cached)
logger.debug('CACHE: Evicting files to make room for the new job.')
# Now do the actual file removal
totalEvicted = 0
while not cacheInfo.isBalanced() and len(deletableCacheFiles) > 0:
cachedFile, fileCreateTime, cachedFileSize = deletableCacheFiles.pop()
os.remove(cachedFile)
cacheInfo.cached -= cachedFileSize if self.nlinkThreshold != 2 else 0
totalEvicted += cachedFileSize
assert cacheInfo.cached >= 0
logger.debug('CACHE: Evicted file with ID \'%s\' (%s bytes)' %
(self.decodedFileID(cachedFile), cachedFileSize))
logger.debug('CACHE: Evicted a total of %s bytes. Available space is now %s bytes.',
totalEvicted,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)))
if not cacheInfo.isBalanced():
raise CacheUnbalancedError() | [
"def",
"cleanCache",
"(",
"self",
",",
"newJobReqs",
")",
":",
"with",
"self",
".",
"_CacheState",
".",
"open",
"(",
"self",
")",
"as",
"cacheInfo",
":",
"# Add the new job's disk requirements to the sigmaJobDisk variable",
"cacheInfo",
".",
"sigmaJob",
"+=",
"newJo... | Cleanup all files in the cache directory to ensure that at lead newJobReqs are available
for use.
:param float newJobReqs: the total number of bytes of files allowed in the cache. | [
"Cleanup",
"all",
"files",
"in",
"the",
"cache",
"directory",
"to",
"ensure",
"that",
"at",
"lead",
"newJobReqs",
"are",
"available",
"for",
"use",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L1122-L1184 |
225,116 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore.removeSingleCachedFile | def removeSingleCachedFile(self, fileStoreID):
"""
Removes a single file described by the fileStoreID from the cache forcibly.
"""
with self._CacheState.open(self) as cacheInfo:
cachedFile = self.encodedFileID(fileStoreID)
cachedFileStats = os.stat(cachedFile)
# We know the file exists because this function was called in the if block. So we
# have to ensure nothing has changed since then.
assert cachedFileStats.st_nlink <= self.nlinkThreshold, \
'Attempting to delete a global file that is in use by another job.'
assert cachedFileStats.st_nlink >= self.nlinkThreshold, \
'A global file has too FEW links at deletion time. Our link threshold is incorrect!'
# Remove the file size from the cached file size if the jobstore is not fileJobStore
# and then delete the file
os.remove(cachedFile)
if self.nlinkThreshold != 2:
cacheInfo.cached -= cachedFileStats.st_size
if not cacheInfo.isBalanced():
self.logToMaster('CACHE: The cache was not balanced on removing single file',
logging.WARN)
self.logToMaster('CACHE: Successfully removed file with ID \'%s\'.' % fileStoreID)
return None | python | def removeSingleCachedFile(self, fileStoreID):
with self._CacheState.open(self) as cacheInfo:
cachedFile = self.encodedFileID(fileStoreID)
cachedFileStats = os.stat(cachedFile)
# We know the file exists because this function was called in the if block. So we
# have to ensure nothing has changed since then.
assert cachedFileStats.st_nlink <= self.nlinkThreshold, \
'Attempting to delete a global file that is in use by another job.'
assert cachedFileStats.st_nlink >= self.nlinkThreshold, \
'A global file has too FEW links at deletion time. Our link threshold is incorrect!'
# Remove the file size from the cached file size if the jobstore is not fileJobStore
# and then delete the file
os.remove(cachedFile)
if self.nlinkThreshold != 2:
cacheInfo.cached -= cachedFileStats.st_size
if not cacheInfo.isBalanced():
self.logToMaster('CACHE: The cache was not balanced on removing single file',
logging.WARN)
self.logToMaster('CACHE: Successfully removed file with ID \'%s\'.' % fileStoreID)
return None | [
"def",
"removeSingleCachedFile",
"(",
"self",
",",
"fileStoreID",
")",
":",
"with",
"self",
".",
"_CacheState",
".",
"open",
"(",
"self",
")",
"as",
"cacheInfo",
":",
"cachedFile",
"=",
"self",
".",
"encodedFileID",
"(",
"fileStoreID",
")",
"cachedFileStats",
... | Removes a single file described by the fileStoreID from the cache forcibly. | [
"Removes",
"a",
"single",
"file",
"described",
"by",
"the",
"fileStoreID",
"from",
"the",
"cache",
"forcibly",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L1186-L1209 |
225,117 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore._accountForNlinkEquals2 | def _accountForNlinkEquals2(self, localFilePath):
"""
This is a utility function that accounts for the fact that if nlinkThreshold == 2, the
size of the file is accounted for by the file store copy of the file and thus the file
size shouldn't be added to the cached file sizes.
:param str localFilePath: Path to the local file that was linked to the file store copy.
"""
fileStats = os.stat(localFilePath)
assert fileStats.st_nlink >= self.nlinkThreshold
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= fileStats.st_size
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.updateJobReqs(fileStats.st_size, 'remove') | python | def _accountForNlinkEquals2(self, localFilePath):
fileStats = os.stat(localFilePath)
assert fileStats.st_nlink >= self.nlinkThreshold
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= fileStats.st_size
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.updateJobReqs(fileStats.st_size, 'remove') | [
"def",
"_accountForNlinkEquals2",
"(",
"self",
",",
"localFilePath",
")",
":",
"fileStats",
"=",
"os",
".",
"stat",
"(",
"localFilePath",
")",
"assert",
"fileStats",
".",
"st_nlink",
">=",
"self",
".",
"nlinkThreshold",
"with",
"self",
".",
"_CacheState",
".",... | This is a utility function that accounts for the fact that if nlinkThreshold == 2, the
size of the file is accounted for by the file store copy of the file and thus the file
size shouldn't be added to the cached file sizes.
:param str localFilePath: Path to the local file that was linked to the file store copy. | [
"This",
"is",
"a",
"utility",
"function",
"that",
"accounts",
"for",
"the",
"fact",
"that",
"if",
"nlinkThreshold",
"==",
"2",
"the",
"size",
"of",
"the",
"file",
"is",
"accounted",
"for",
"by",
"the",
"file",
"store",
"copy",
"of",
"the",
"file",
"and",... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L1245-L1258 |
225,118 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore.returnJobReqs | def returnJobReqs(self, jobReqs):
"""
This function returns the effective job requirements back to the pool after the job
completes. It also deletes the local copies of files with the cache lock held.
:param float jobReqs: Original size requirement of the job
"""
# Since we are only reading this job's specific values from the state file, we don't
# need a lock
jobState = self._JobState(self._CacheState._load(self.cacheStateFile
).jobState[self.jobID])
for x in list(jobState.jobSpecificFiles.keys()):
self.deleteLocalFile(x)
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= jobReqs | python | def returnJobReqs(self, jobReqs):
# Since we are only reading this job's specific values from the state file, we don't
# need a lock
jobState = self._JobState(self._CacheState._load(self.cacheStateFile
).jobState[self.jobID])
for x in list(jobState.jobSpecificFiles.keys()):
self.deleteLocalFile(x)
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= jobReqs | [
"def",
"returnJobReqs",
"(",
"self",
",",
"jobReqs",
")",
":",
"# Since we are only reading this job's specific values from the state file, we don't",
"# need a lock",
"jobState",
"=",
"self",
".",
"_JobState",
"(",
"self",
".",
"_CacheState",
".",
"_load",
"(",
"self",
... | This function returns the effective job requirements back to the pool after the job
completes. It also deletes the local copies of files with the cache lock held.
:param float jobReqs: Original size requirement of the job | [
"This",
"function",
"returns",
"the",
"effective",
"job",
"requirements",
"back",
"to",
"the",
"pool",
"after",
"the",
"job",
"completes",
".",
"It",
"also",
"deletes",
"the",
"local",
"copies",
"of",
"files",
"with",
"the",
"cache",
"lock",
"held",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L1260-L1274 |
225,119 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore.asyncWrite | def asyncWrite(self):
"""
A function to write files asynchronously to the job store such that subsequent jobs are
not delayed by a long write operation.
"""
try:
while True:
try:
# Block for up to two seconds waiting for a file
args = self.queue.get(timeout=2)
except Empty:
# Check if termination event is signaled
# (set in the event of an exception in the worker)
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting")
continue
# Normal termination condition is getting None from queue
if args is None:
break
inputFileHandle, jobStoreFileID = args
cachedFileName = self.encodedFileID(jobStoreFileID)
# Ensure that the harbinger exists in the cache directory and that the PID
# matches that of this writing thread.
# If asyncWrite is ported to subprocesses instead of threads in the future,
# insert logic here to securely overwrite the harbinger file.
harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName)
assert harbingerFile.exists()
assert harbingerFile.read() == int(os.getpid())
# We pass in a fileHandle, rather than the file-name, in case
# the file itself is deleted. The fileHandle itself should persist
# while we maintain the open file handle
with self.jobStore.updateFileStream(jobStoreFileID) as outputFileHandle:
shutil.copyfileobj(inputFileHandle, outputFileHandle)
inputFileHandle.close()
# Remove the file from the lock files
with self._pendingFileWritesLock:
self._pendingFileWrites.remove(jobStoreFileID)
# Remove the harbinger file
harbingerFile.delete()
except:
self._terminateEvent.set()
raise | python | def asyncWrite(self):
try:
while True:
try:
# Block for up to two seconds waiting for a file
args = self.queue.get(timeout=2)
except Empty:
# Check if termination event is signaled
# (set in the event of an exception in the worker)
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting")
continue
# Normal termination condition is getting None from queue
if args is None:
break
inputFileHandle, jobStoreFileID = args
cachedFileName = self.encodedFileID(jobStoreFileID)
# Ensure that the harbinger exists in the cache directory and that the PID
# matches that of this writing thread.
# If asyncWrite is ported to subprocesses instead of threads in the future,
# insert logic here to securely overwrite the harbinger file.
harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName)
assert harbingerFile.exists()
assert harbingerFile.read() == int(os.getpid())
# We pass in a fileHandle, rather than the file-name, in case
# the file itself is deleted. The fileHandle itself should persist
# while we maintain the open file handle
with self.jobStore.updateFileStream(jobStoreFileID) as outputFileHandle:
shutil.copyfileobj(inputFileHandle, outputFileHandle)
inputFileHandle.close()
# Remove the file from the lock files
with self._pendingFileWritesLock:
self._pendingFileWrites.remove(jobStoreFileID)
# Remove the harbinger file
harbingerFile.delete()
except:
self._terminateEvent.set()
raise | [
"def",
"asyncWrite",
"(",
"self",
")",
":",
"try",
":",
"while",
"True",
":",
"try",
":",
"# Block for up to two seconds waiting for a file",
"args",
"=",
"self",
".",
"queue",
".",
"get",
"(",
"timeout",
"=",
"2",
")",
"except",
"Empty",
":",
"# Check if te... | A function to write files asynchronously to the job store such that subsequent jobs are
not delayed by a long write operation. | [
"A",
"function",
"to",
"write",
"files",
"asynchronously",
"to",
"the",
"job",
"store",
"such",
"that",
"subsequent",
"jobs",
"are",
"not",
"delayed",
"by",
"a",
"long",
"write",
"operation",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L1505-L1546 |
225,120 | DataBiosphere/toil | src/toil/fileStore.py | CachingFileStore._updateJobWhenDone | def _updateJobWhenDone(self):
"""
Asynchronously update the status of the job on the disk, first waiting \
until the writing threads have finished and the input blockFn has stopped \
blocking.
"""
def asyncUpdate():
try:
# Wait till all file writes have completed
for i in range(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
# Wait till input block-fn returns - in the event of an exception
# this will eventually terminate
self.inputBlockFn()
# Check the terminate event, if set we can not guarantee
# that the workers ended correctly, therefore we exit without
# completing the update
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting before update")
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
finally:
# Indicate that _blockFn can return
# This code will always run
self.updateSemaphore.release()
# The update semaphore is held while the job is written to the job store
try:
self.updateSemaphore.acquire()
t = Thread(target=asyncUpdate)
t.start()
except:
# This is to ensure that the semaphore is released in a crash to stop a deadlock
# scenario
self.updateSemaphore.release()
raise | python | def _updateJobWhenDone(self):
def asyncUpdate():
try:
# Wait till all file writes have completed
for i in range(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
# Wait till input block-fn returns - in the event of an exception
# this will eventually terminate
self.inputBlockFn()
# Check the terminate event, if set we can not guarantee
# that the workers ended correctly, therefore we exit without
# completing the update
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting before update")
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
finally:
# Indicate that _blockFn can return
# This code will always run
self.updateSemaphore.release()
# The update semaphore is held while the job is written to the job store
try:
self.updateSemaphore.acquire()
t = Thread(target=asyncUpdate)
t.start()
except:
# This is to ensure that the semaphore is released in a crash to stop a deadlock
# scenario
self.updateSemaphore.release()
raise | [
"def",
"_updateJobWhenDone",
"(",
"self",
")",
":",
"def",
"asyncUpdate",
"(",
")",
":",
"try",
":",
"# Wait till all file writes have completed",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"workers",
")",
")",
":",
"self",
".",
"queue",
".",... | Asynchronously update the status of the job on the disk, first waiting \
until the writing threads have finished and the input blockFn has stopped \
blocking. | [
"Asynchronously",
"update",
"the",
"status",
"of",
"the",
"job",
"on",
"the",
"disk",
"first",
"waiting",
"\\",
"until",
"the",
"writing",
"threads",
"have",
"finished",
"and",
"the",
"input",
"blockFn",
"has",
"stopped",
"\\",
"blocking",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L1548-L1609 |
225,121 | DataBiosphere/toil | src/toil/fileStore.py | NonCachingFileStore._getAllJobStates | def _getAllJobStates(workflowDir):
"""
Generator function that deserializes and yields the job state for every job on the node,
one at a time.
:param str workflowDir: The location of the workflow directory on the node.
:return: dict with keys (jobName, jobPID, jobDir, deferredFunctions)
:rtype: dict
"""
jobStateFiles = []
for root, dirs, files in os.walk(workflowDir):
for filename in files:
if filename == '.jobState':
jobStateFiles.append(os.path.join(root, filename))
for filename in jobStateFiles:
try:
yield NonCachingFileStore._readJobState(filename)
except IOError as e:
if e.errno == 2:
# job finished & deleted its jobState file since the jobState files were discovered
continue
else:
raise | python | def _getAllJobStates(workflowDir):
jobStateFiles = []
for root, dirs, files in os.walk(workflowDir):
for filename in files:
if filename == '.jobState':
jobStateFiles.append(os.path.join(root, filename))
for filename in jobStateFiles:
try:
yield NonCachingFileStore._readJobState(filename)
except IOError as e:
if e.errno == 2:
# job finished & deleted its jobState file since the jobState files were discovered
continue
else:
raise | [
"def",
"_getAllJobStates",
"(",
"workflowDir",
")",
":",
"jobStateFiles",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"workflowDir",
")",
":",
"for",
"filename",
"in",
"files",
":",
"if",
"filename",
"==",
"'.... | Generator function that deserializes and yields the job state for every job on the node,
one at a time.
:param str workflowDir: The location of the workflow directory on the node.
:return: dict with keys (jobName, jobPID, jobDir, deferredFunctions)
:rtype: dict | [
"Generator",
"function",
"that",
"deserializes",
"and",
"yields",
"the",
"job",
"state",
"for",
"every",
"job",
"on",
"the",
"node",
"one",
"at",
"a",
"time",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L1822-L1844 |
225,122 | DataBiosphere/toil | src/toil/fileStore.py | NonCachingFileStore._createJobStateFile | def _createJobStateFile(self):
"""
Create the job state file for the current job and fill in the required
values.
:return: Path to the job state file
:rtype: str
"""
jobStateFile = os.path.join(self.localTempDir, '.jobState')
jobState = {'jobPID': os.getpid(),
'jobName': self.jobName,
'jobDir': self.localTempDir,
'deferredFunctions': []}
with open(jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(jobStateFile + '.tmp', jobStateFile)
return jobStateFile | python | def _createJobStateFile(self):
jobStateFile = os.path.join(self.localTempDir, '.jobState')
jobState = {'jobPID': os.getpid(),
'jobName': self.jobName,
'jobDir': self.localTempDir,
'deferredFunctions': []}
with open(jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(jobStateFile + '.tmp', jobStateFile)
return jobStateFile | [
"def",
"_createJobStateFile",
"(",
"self",
")",
":",
"jobStateFile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"localTempDir",
",",
"'.jobState'",
")",
"jobState",
"=",
"{",
"'jobPID'",
":",
"os",
".",
"getpid",
"(",
")",
",",
"'jobName'",
... | Create the job state file for the current job and fill in the required
values.
:return: Path to the job state file
:rtype: str | [
"Create",
"the",
"job",
"state",
"file",
"for",
"the",
"current",
"job",
"and",
"fill",
"in",
"the",
"required",
"values",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L1861-L1877 |
225,123 | DataBiosphere/toil | src/toil/fileStore.py | WriteWatchingStream.write | def write(self, data):
"""
Write the given data to the file.
"""
# Do the write
self.backingStream.write(data)
for listener in self.writeListeners:
# Send out notifications
listener(len(data)) | python | def write(self, data):
# Do the write
self.backingStream.write(data)
for listener in self.writeListeners:
# Send out notifications
listener(len(data)) | [
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"# Do the write",
"self",
".",
"backingStream",
".",
"write",
"(",
"data",
")",
"for",
"listener",
"in",
"self",
".",
"writeListeners",
":",
"# Send out notifications",
"listener",
"(",
"len",
"(",
"data",
... | Write the given data to the file. | [
"Write",
"the",
"given",
"data",
"to",
"the",
"file",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L1931-L1941 |
225,124 | DataBiosphere/toil | src/toil/serviceManager.py | ServiceManager.scheduleServices | def scheduleServices(self, jobGraph):
"""
Schedule the services of a job asynchronously.
When the job's services are running the jobGraph for the job will
be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning.
:param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule.
"""
# Add jobGraph to set being processed by the service manager
self.jobGraphsWithServicesBeingStarted.add(jobGraph)
# Add number of jobs managed by ServiceManager
self.jobsIssuedToServiceManager += sum(map(len, jobGraph.services)) + 1 # The plus one accounts for the root job
# Asynchronously schedule the services
self._jobGraphsWithServicesToStart.put(jobGraph) | python | def scheduleServices(self, jobGraph):
# Add jobGraph to set being processed by the service manager
self.jobGraphsWithServicesBeingStarted.add(jobGraph)
# Add number of jobs managed by ServiceManager
self.jobsIssuedToServiceManager += sum(map(len, jobGraph.services)) + 1 # The plus one accounts for the root job
# Asynchronously schedule the services
self._jobGraphsWithServicesToStart.put(jobGraph) | [
"def",
"scheduleServices",
"(",
"self",
",",
"jobGraph",
")",
":",
"# Add jobGraph to set being processed by the service manager",
"self",
".",
"jobGraphsWithServicesBeingStarted",
".",
"add",
"(",
"jobGraph",
")",
"# Add number of jobs managed by ServiceManager",
"self",
".",
... | Schedule the services of a job asynchronously.
When the job's services are running the jobGraph for the job will
be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning.
:param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule. | [
"Schedule",
"the",
"services",
"of",
"a",
"job",
"asynchronously",
".",
"When",
"the",
"job",
"s",
"services",
"are",
"running",
"the",
"jobGraph",
"for",
"the",
"job",
"will",
"be",
"returned",
"by",
"toil",
".",
"leader",
".",
"ServiceManager",
".",
"get... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/serviceManager.py#L73-L88 |
225,125 | DataBiosphere/toil | src/toil/serviceManager.py | ServiceManager.shutdown | def shutdown(self):
"""
Cleanly terminate worker threads starting and killing services. Will block
until all services are started and blocked.
"""
logger.debug('Waiting for service manager thread to finish ...')
startTime = time.time()
self._terminate.set()
self._serviceStarter.join()
# Kill any services still running to avoid deadlock
for services in list(self.toilState.servicesIssued.values()):
self.killServices(services, error=True)
logger.debug('... finished shutting down the service manager. Took %s seconds', time.time() - startTime) | python | def shutdown(self):
logger.debug('Waiting for service manager thread to finish ...')
startTime = time.time()
self._terminate.set()
self._serviceStarter.join()
# Kill any services still running to avoid deadlock
for services in list(self.toilState.servicesIssued.values()):
self.killServices(services, error=True)
logger.debug('... finished shutting down the service manager. Took %s seconds', time.time() - startTime) | [
"def",
"shutdown",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Waiting for service manager thread to finish ...'",
")",
"startTime",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"_terminate",
".",
"set",
"(",
")",
"self",
".",
"_serviceStarter",
... | Cleanly terminate worker threads starting and killing services. Will block
until all services are started and blocked. | [
"Cleanly",
"terminate",
"worker",
"threads",
"starting",
"and",
"killing",
"services",
".",
"Will",
"block",
"until",
"all",
"services",
"are",
"started",
"and",
"blocked",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/serviceManager.py#L153-L165 |
225,126 | DataBiosphere/toil | src/toil/serviceManager.py | ServiceManager._startServices | def _startServices(jobGraphsWithServicesToStart,
jobGraphsWithServicesThatHaveStarted,
serviceJobsToStart,
terminate, jobStore):
"""
Thread used to schedule services.
"""
servicesThatAreStarting = set()
servicesRemainingToStartForJob = {}
serviceToJobGraph = {}
while True:
with throttle(1.0):
if terminate.is_set():
logger.debug('Received signal to quit starting services.')
break
try:
jobGraph = jobGraphsWithServicesToStart.get_nowait()
if len(jobGraph.services) > 1:
# Have to fall back to the old blocking behavior to
# ensure entire service "groups" are issued as a whole.
blockUntilServiceGroupIsStarted(jobGraph,
jobGraphsWithServicesThatHaveStarted,
serviceJobsToStart, terminate, jobStore)
continue
# Found a new job that needs to schedule its services.
for serviceJob in jobGraph.services[0]:
serviceToJobGraph[serviceJob] = jobGraph
servicesRemainingToStartForJob[jobGraph] = len(jobGraph.services[0])
# Issue the service jobs all at once.
for serviceJob in jobGraph.services[0]:
logger.debug("Service manager is starting service job: %s, start ID: %s", serviceJob, serviceJob.startJobStoreID)
serviceJobsToStart.put(serviceJob)
# We should now start to monitor these services to see if
# they've started yet.
servicesThatAreStarting.update(jobGraph.services[0])
except Empty:
# No new jobs that need services scheduled.
pass
for serviceJob in list(servicesThatAreStarting):
if not jobStore.fileExists(serviceJob.startJobStoreID):
# Service has started!
servicesThatAreStarting.remove(serviceJob)
parentJob = serviceToJobGraph[serviceJob]
servicesRemainingToStartForJob[parentJob] -= 1
assert servicesRemainingToStartForJob[parentJob] >= 0
del serviceToJobGraph[serviceJob]
# Find if any jobGraphs have had *all* their services started.
jobGraphsToRemove = set()
for jobGraph, remainingServices in servicesRemainingToStartForJob.items():
if remainingServices == 0:
jobGraphsWithServicesThatHaveStarted.put(jobGraph)
jobGraphsToRemove.add(jobGraph)
for jobGraph in jobGraphsToRemove:
del servicesRemainingToStartForJob[jobGraph] | python | def _startServices(jobGraphsWithServicesToStart,
jobGraphsWithServicesThatHaveStarted,
serviceJobsToStart,
terminate, jobStore):
servicesThatAreStarting = set()
servicesRemainingToStartForJob = {}
serviceToJobGraph = {}
while True:
with throttle(1.0):
if terminate.is_set():
logger.debug('Received signal to quit starting services.')
break
try:
jobGraph = jobGraphsWithServicesToStart.get_nowait()
if len(jobGraph.services) > 1:
# Have to fall back to the old blocking behavior to
# ensure entire service "groups" are issued as a whole.
blockUntilServiceGroupIsStarted(jobGraph,
jobGraphsWithServicesThatHaveStarted,
serviceJobsToStart, terminate, jobStore)
continue
# Found a new job that needs to schedule its services.
for serviceJob in jobGraph.services[0]:
serviceToJobGraph[serviceJob] = jobGraph
servicesRemainingToStartForJob[jobGraph] = len(jobGraph.services[0])
# Issue the service jobs all at once.
for serviceJob in jobGraph.services[0]:
logger.debug("Service manager is starting service job: %s, start ID: %s", serviceJob, serviceJob.startJobStoreID)
serviceJobsToStart.put(serviceJob)
# We should now start to monitor these services to see if
# they've started yet.
servicesThatAreStarting.update(jobGraph.services[0])
except Empty:
# No new jobs that need services scheduled.
pass
for serviceJob in list(servicesThatAreStarting):
if not jobStore.fileExists(serviceJob.startJobStoreID):
# Service has started!
servicesThatAreStarting.remove(serviceJob)
parentJob = serviceToJobGraph[serviceJob]
servicesRemainingToStartForJob[parentJob] -= 1
assert servicesRemainingToStartForJob[parentJob] >= 0
del serviceToJobGraph[serviceJob]
# Find if any jobGraphs have had *all* their services started.
jobGraphsToRemove = set()
for jobGraph, remainingServices in servicesRemainingToStartForJob.items():
if remainingServices == 0:
jobGraphsWithServicesThatHaveStarted.put(jobGraph)
jobGraphsToRemove.add(jobGraph)
for jobGraph in jobGraphsToRemove:
del servicesRemainingToStartForJob[jobGraph] | [
"def",
"_startServices",
"(",
"jobGraphsWithServicesToStart",
",",
"jobGraphsWithServicesThatHaveStarted",
",",
"serviceJobsToStart",
",",
"terminate",
",",
"jobStore",
")",
":",
"servicesThatAreStarting",
"=",
"set",
"(",
")",
"servicesRemainingToStartForJob",
"=",
"{",
... | Thread used to schedule services. | [
"Thread",
"used",
"to",
"schedule",
"services",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/serviceManager.py#L168-L223 |
225,127 | DataBiosphere/toil | src/toil/provisioners/aws/__init__.py | optimize_spot_bid | def optimize_spot_bid(ctx, instance_type, spot_bid):
"""
Check whether the bid is sane and makes an effort to place the instance in a sensible zone.
"""
spot_history = _get_spot_history(ctx, instance_type)
if spot_history:
_check_spot_bid(spot_bid, spot_history)
zones = ctx.ec2.get_all_zones()
most_stable_zone = choose_spot_zone(zones, spot_bid, spot_history)
logger.debug("Placing spot instances in zone %s.", most_stable_zone)
return most_stable_zone | python | def optimize_spot_bid(ctx, instance_type, spot_bid):
spot_history = _get_spot_history(ctx, instance_type)
if spot_history:
_check_spot_bid(spot_bid, spot_history)
zones = ctx.ec2.get_all_zones()
most_stable_zone = choose_spot_zone(zones, spot_bid, spot_history)
logger.debug("Placing spot instances in zone %s.", most_stable_zone)
return most_stable_zone | [
"def",
"optimize_spot_bid",
"(",
"ctx",
",",
"instance_type",
",",
"spot_bid",
")",
":",
"spot_history",
"=",
"_get_spot_history",
"(",
"ctx",
",",
"instance_type",
")",
"if",
"spot_history",
":",
"_check_spot_bid",
"(",
"spot_bid",
",",
"spot_history",
")",
"zo... | Check whether the bid is sane and makes an effort to place the instance in a sensible zone. | [
"Check",
"whether",
"the",
"bid",
"is",
"sane",
"and",
"makes",
"an",
"effort",
"to",
"place",
"the",
"instance",
"in",
"a",
"sensible",
"zone",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/aws/__init__.py#L128-L138 |
225,128 | DataBiosphere/toil | src/toil/provisioners/aws/__init__.py | _check_spot_bid | def _check_spot_bid(spot_bid, spot_history):
"""
Prevents users from potentially over-paying for instances
Note: this checks over the whole region, not a particular zone
:param spot_bid: float
:type spot_history: list[SpotPriceHistory]
:raises UserError: if bid is > 2X the spot price's average
>>> from collections import namedtuple
>>> FauxHistory = namedtuple( "FauxHistory", [ "price", "availability_zone" ] )
>>> spot_data = [ FauxHistory( 0.1, "us-west-2a" ), \
FauxHistory( 0.2, "us-west-2a" ), \
FauxHistory( 0.3, "us-west-2b" ), \
FauxHistory( 0.6, "us-west-2b" ) ]
>>> # noinspection PyProtectedMember
>>> _check_spot_bid( 0.1, spot_data )
>>> # noinspection PyProtectedMember
# >>> Box._check_spot_bid( 2, spot_data )
Traceback (most recent call last):
...
UserError: Your bid $ 2.000000 is more than double this instance type's average spot price ($ 0.300000) over the last week
"""
average = mean([datum.price for datum in spot_history])
if spot_bid > average * 2:
logger.warn("Your bid $ %f is more than double this instance type's average "
"spot price ($ %f) over the last week", spot_bid, average) | python | def _check_spot_bid(spot_bid, spot_history):
average = mean([datum.price for datum in spot_history])
if spot_bid > average * 2:
logger.warn("Your bid $ %f is more than double this instance type's average "
"spot price ($ %f) over the last week", spot_bid, average) | [
"def",
"_check_spot_bid",
"(",
"spot_bid",
",",
"spot_history",
")",
":",
"average",
"=",
"mean",
"(",
"[",
"datum",
".",
"price",
"for",
"datum",
"in",
"spot_history",
"]",
")",
"if",
"spot_bid",
">",
"average",
"*",
"2",
":",
"logger",
".",
"warn",
"... | Prevents users from potentially over-paying for instances
Note: this checks over the whole region, not a particular zone
:param spot_bid: float
:type spot_history: list[SpotPriceHistory]
:raises UserError: if bid is > 2X the spot price's average
>>> from collections import namedtuple
>>> FauxHistory = namedtuple( "FauxHistory", [ "price", "availability_zone" ] )
>>> spot_data = [ FauxHistory( 0.1, "us-west-2a" ), \
FauxHistory( 0.2, "us-west-2a" ), \
FauxHistory( 0.3, "us-west-2b" ), \
FauxHistory( 0.6, "us-west-2b" ) ]
>>> # noinspection PyProtectedMember
>>> _check_spot_bid( 0.1, spot_data )
>>> # noinspection PyProtectedMember
# >>> Box._check_spot_bid( 2, spot_data )
Traceback (most recent call last):
...
UserError: Your bid $ 2.000000 is more than double this instance type's average spot price ($ 0.300000) over the last week | [
"Prevents",
"users",
"from",
"potentially",
"over",
"-",
"paying",
"for",
"instances"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/aws/__init__.py#L141-L171 |
225,129 | DataBiosphere/toil | src/toil/provisioners/aws/__init__.py | checkValidNodeTypes | def checkValidNodeTypes(provisioner, nodeTypes):
"""
Raises if an invalid nodeType is specified for aws, azure, or gce.
:param str provisioner: 'aws', 'gce', or 'azure' to specify which cloud provisioner used.
:param nodeTypes: A list of node types. Example: ['t2.micro', 't2.medium']
:return: Nothing. Raises if invalid nodeType.
"""
if not nodeTypes:
return
if not isinstance(nodeTypes, list):
nodeTypes = [nodeTypes]
if not isinstance(nodeTypes[0], string_types):
return
# check if a valid node type for aws
from toil.lib.generatedEC2Lists import E2Instances, regionDict
if provisioner == 'aws':
from toil.provisioners.aws import getCurrentAWSZone
currentZone = getCurrentAWSZone()
if not currentZone:
currentZone = 'us-west-2'
else:
currentZone = currentZone[:-1] # adds something like 'a' or 'b' to the end
# check if instance type exists in this region
for nodeType in nodeTypes:
if nodeType and ':' in nodeType:
nodeType = nodeType.split(':')[0]
if nodeType not in regionDict[currentZone]:
# They probably misspelled it and can't tell.
close = get_close_matches(nodeType, regionDict[currentZone], 1)
if len(close) > 0:
helpText = ' Did you mean ' + close[0] + '?'
else:
helpText = ''
raise RuntimeError('Invalid nodeType (%s) specified for AWS in region: %s.%s'
'' % (nodeType, currentZone, helpText))
# Only checks if aws nodeType specified for gce/azure atm.
if provisioner == 'gce' or provisioner == 'azure':
for nodeType in nodeTypes:
if nodeType and ':' in nodeType:
nodeType = nodeType.split(':')[0]
try:
E2Instances[nodeType]
raise RuntimeError("It looks like you've specified an AWS nodeType with the "
"{} provisioner. Please specify an {} nodeType."
"".format(provisioner, provisioner))
except KeyError:
pass | python | def checkValidNodeTypes(provisioner, nodeTypes):
if not nodeTypes:
return
if not isinstance(nodeTypes, list):
nodeTypes = [nodeTypes]
if not isinstance(nodeTypes[0], string_types):
return
# check if a valid node type for aws
from toil.lib.generatedEC2Lists import E2Instances, regionDict
if provisioner == 'aws':
from toil.provisioners.aws import getCurrentAWSZone
currentZone = getCurrentAWSZone()
if not currentZone:
currentZone = 'us-west-2'
else:
currentZone = currentZone[:-1] # adds something like 'a' or 'b' to the end
# check if instance type exists in this region
for nodeType in nodeTypes:
if nodeType and ':' in nodeType:
nodeType = nodeType.split(':')[0]
if nodeType not in regionDict[currentZone]:
# They probably misspelled it and can't tell.
close = get_close_matches(nodeType, regionDict[currentZone], 1)
if len(close) > 0:
helpText = ' Did you mean ' + close[0] + '?'
else:
helpText = ''
raise RuntimeError('Invalid nodeType (%s) specified for AWS in region: %s.%s'
'' % (nodeType, currentZone, helpText))
# Only checks if aws nodeType specified for gce/azure atm.
if provisioner == 'gce' or provisioner == 'azure':
for nodeType in nodeTypes:
if nodeType and ':' in nodeType:
nodeType = nodeType.split(':')[0]
try:
E2Instances[nodeType]
raise RuntimeError("It looks like you've specified an AWS nodeType with the "
"{} provisioner. Please specify an {} nodeType."
"".format(provisioner, provisioner))
except KeyError:
pass | [
"def",
"checkValidNodeTypes",
"(",
"provisioner",
",",
"nodeTypes",
")",
":",
"if",
"not",
"nodeTypes",
":",
"return",
"if",
"not",
"isinstance",
"(",
"nodeTypes",
",",
"list",
")",
":",
"nodeTypes",
"=",
"[",
"nodeTypes",
"]",
"if",
"not",
"isinstance",
"... | Raises if an invalid nodeType is specified for aws, azure, or gce.
:param str provisioner: 'aws', 'gce', or 'azure' to specify which cloud provisioner used.
:param nodeTypes: A list of node types. Example: ['t2.micro', 't2.medium']
:return: Nothing. Raises if invalid nodeType. | [
"Raises",
"if",
"an",
"invalid",
"nodeType",
"is",
"specified",
"for",
"aws",
"azure",
"or",
"gce",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/aws/__init__.py#L202-L249 |
225,130 | DataBiosphere/toil | src/toil/utils/toilStats.py | padStr | def padStr(s, field=None):
""" Pad the begining of a string with spaces, if necessary.
"""
if field is None:
return s
else:
if len(s) >= field:
return s
else:
return " " * (field - len(s)) + s | python | def padStr(s, field=None):
if field is None:
return s
else:
if len(s) >= field:
return s
else:
return " " * (field - len(s)) + s | [
"def",
"padStr",
"(",
"s",
",",
"field",
"=",
"None",
")",
":",
"if",
"field",
"is",
"None",
":",
"return",
"s",
"else",
":",
"if",
"len",
"(",
"s",
")",
">=",
"field",
":",
"return",
"s",
"else",
":",
"return",
"\" \"",
"*",
"(",
"field",
"-",... | Pad the begining of a string with spaces, if necessary. | [
"Pad",
"the",
"begining",
"of",
"a",
"string",
"with",
"spaces",
"if",
"necessary",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L126-L135 |
225,131 | DataBiosphere/toil | src/toil/utils/toilStats.py | prettyMemory | def prettyMemory(k, field=None, isBytes=False):
""" Given input k as kilobytes, return a nicely formatted string.
"""
if isBytes:
k /= 1024
if k < 1024:
return padStr("%gK" % k, field)
if k < (1024 * 1024):
return padStr("%.1fM" % (old_div(k, 1024.0)), field)
if k < (1024 * 1024 * 1024):
return padStr("%.1fG" % (k / 1024.0 / 1024.0), field)
if k < (1024 * 1024 * 1024 * 1024):
return padStr("%.1fT" % (k / 1024.0 / 1024.0 / 1024.0), field)
if k < (1024 * 1024 * 1024 * 1024 * 1024):
return padStr("%.1fP" % (k / 1024.0 / 1024.0 / 1024.0 / 1024.0), field) | python | def prettyMemory(k, field=None, isBytes=False):
if isBytes:
k /= 1024
if k < 1024:
return padStr("%gK" % k, field)
if k < (1024 * 1024):
return padStr("%.1fM" % (old_div(k, 1024.0)), field)
if k < (1024 * 1024 * 1024):
return padStr("%.1fG" % (k / 1024.0 / 1024.0), field)
if k < (1024 * 1024 * 1024 * 1024):
return padStr("%.1fT" % (k / 1024.0 / 1024.0 / 1024.0), field)
if k < (1024 * 1024 * 1024 * 1024 * 1024):
return padStr("%.1fP" % (k / 1024.0 / 1024.0 / 1024.0 / 1024.0), field) | [
"def",
"prettyMemory",
"(",
"k",
",",
"field",
"=",
"None",
",",
"isBytes",
"=",
"False",
")",
":",
"if",
"isBytes",
":",
"k",
"/=",
"1024",
"if",
"k",
"<",
"1024",
":",
"return",
"padStr",
"(",
"\"%gK\"",
"%",
"k",
",",
"field",
")",
"if",
"k",
... | Given input k as kilobytes, return a nicely formatted string. | [
"Given",
"input",
"k",
"as",
"kilobytes",
"return",
"a",
"nicely",
"formatted",
"string",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L137-L151 |
225,132 | DataBiosphere/toil | src/toil/utils/toilStats.py | prettyTime | def prettyTime(t, field=None):
""" Given input t as seconds, return a nicely formatted string.
"""
from math import floor
pluralDict = {True: "s", False: ""}
if t < 120:
return padStr("%ds" % t, field)
if t < 120 * 60:
m = floor(old_div(t, 60.))
s = t % 60
return padStr("%dm%ds" % (m, s), field)
if t < 25 * 60 * 60:
h = floor(t / 60. / 60.)
m = floor(old_div((t - (h * 60. * 60.)), 60.))
s = t % 60
return padStr("%dh%gm%ds" % (h, m, s), field)
if t < 7 * 24 * 60 * 60:
d = floor(t / 24. / 60. / 60.)
h = floor((t - (d * 24. * 60. * 60.)) / 60. / 60.)
m = floor(old_div((t
- (d * 24. * 60. * 60.)
- (h * 60. * 60.)), 60.))
s = t % 60
dPlural = pluralDict[d > 1]
return padStr("%dday%s%dh%dm%ds" % (d, dPlural, h, m, s), field)
w = floor(t / 7. / 24. / 60. / 60.)
d = floor((t - (w * 7 * 24 * 60 * 60)) / 24. / 60. / 60.)
h = floor((t
- (w * 7. * 24. * 60. * 60.)
- (d * 24. * 60. * 60.))
/ 60. / 60.)
m = floor(old_div((t
- (w * 7. * 24. * 60. * 60.)
- (d * 24. * 60. * 60.)
- (h * 60. * 60.)), 60.))
s = t % 60
wPlural = pluralDict[w > 1]
dPlural = pluralDict[d > 1]
return padStr("%dweek%s%dday%s%dh%dm%ds" % (w, wPlural, d,
dPlural, h, m, s), field) | python | def prettyTime(t, field=None):
from math import floor
pluralDict = {True: "s", False: ""}
if t < 120:
return padStr("%ds" % t, field)
if t < 120 * 60:
m = floor(old_div(t, 60.))
s = t % 60
return padStr("%dm%ds" % (m, s), field)
if t < 25 * 60 * 60:
h = floor(t / 60. / 60.)
m = floor(old_div((t - (h * 60. * 60.)), 60.))
s = t % 60
return padStr("%dh%gm%ds" % (h, m, s), field)
if t < 7 * 24 * 60 * 60:
d = floor(t / 24. / 60. / 60.)
h = floor((t - (d * 24. * 60. * 60.)) / 60. / 60.)
m = floor(old_div((t
- (d * 24. * 60. * 60.)
- (h * 60. * 60.)), 60.))
s = t % 60
dPlural = pluralDict[d > 1]
return padStr("%dday%s%dh%dm%ds" % (d, dPlural, h, m, s), field)
w = floor(t / 7. / 24. / 60. / 60.)
d = floor((t - (w * 7 * 24 * 60 * 60)) / 24. / 60. / 60.)
h = floor((t
- (w * 7. * 24. * 60. * 60.)
- (d * 24. * 60. * 60.))
/ 60. / 60.)
m = floor(old_div((t
- (w * 7. * 24. * 60. * 60.)
- (d * 24. * 60. * 60.)
- (h * 60. * 60.)), 60.))
s = t % 60
wPlural = pluralDict[w > 1]
dPlural = pluralDict[d > 1]
return padStr("%dweek%s%dday%s%dh%dm%ds" % (w, wPlural, d,
dPlural, h, m, s), field) | [
"def",
"prettyTime",
"(",
"t",
",",
"field",
"=",
"None",
")",
":",
"from",
"math",
"import",
"floor",
"pluralDict",
"=",
"{",
"True",
":",
"\"s\"",
",",
"False",
":",
"\"\"",
"}",
"if",
"t",
"<",
"120",
":",
"return",
"padStr",
"(",
"\"%ds\"",
"%"... | Given input t as seconds, return a nicely formatted string. | [
"Given",
"input",
"t",
"as",
"seconds",
"return",
"a",
"nicely",
"formatted",
"string",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L153-L192 |
225,133 | DataBiosphere/toil | src/toil/utils/toilStats.py | reportTime | def reportTime(t, options, field=None):
""" Given t seconds, report back the correct format as string.
"""
if options.pretty:
return prettyTime(t, field=field)
else:
if field is not None:
return "%*.2f" % (field, t)
else:
return "%.2f" % t | python | def reportTime(t, options, field=None):
if options.pretty:
return prettyTime(t, field=field)
else:
if field is not None:
return "%*.2f" % (field, t)
else:
return "%.2f" % t | [
"def",
"reportTime",
"(",
"t",
",",
"options",
",",
"field",
"=",
"None",
")",
":",
"if",
"options",
".",
"pretty",
":",
"return",
"prettyTime",
"(",
"t",
",",
"field",
"=",
"field",
")",
"else",
":",
"if",
"field",
"is",
"not",
"None",
":",
"retur... | Given t seconds, report back the correct format as string. | [
"Given",
"t",
"seconds",
"report",
"back",
"the",
"correct",
"format",
"as",
"string",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L194-L203 |
225,134 | DataBiosphere/toil | src/toil/utils/toilStats.py | reportMemory | def reportMemory(k, options, field=None, isBytes=False):
""" Given k kilobytes, report back the correct format as string.
"""
if options.pretty:
return prettyMemory(int(k), field=field, isBytes=isBytes)
else:
if isBytes:
k /= 1024.
if field is not None:
return "%*dK" % (field - 1, k) # -1 for the "K"
else:
return "%dK" % int(k) | python | def reportMemory(k, options, field=None, isBytes=False):
if options.pretty:
return prettyMemory(int(k), field=field, isBytes=isBytes)
else:
if isBytes:
k /= 1024.
if field is not None:
return "%*dK" % (field - 1, k) # -1 for the "K"
else:
return "%dK" % int(k) | [
"def",
"reportMemory",
"(",
"k",
",",
"options",
",",
"field",
"=",
"None",
",",
"isBytes",
"=",
"False",
")",
":",
"if",
"options",
".",
"pretty",
":",
"return",
"prettyMemory",
"(",
"int",
"(",
"k",
")",
",",
"field",
"=",
"field",
",",
"isBytes",
... | Given k kilobytes, report back the correct format as string. | [
"Given",
"k",
"kilobytes",
"report",
"back",
"the",
"correct",
"format",
"as",
"string",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L205-L216 |
225,135 | DataBiosphere/toil | src/toil/utils/toilStats.py | refineData | def refineData(root, options):
""" walk down from the root and gather up the important bits.
"""
worker = root.worker
job = root.jobs
jobTypesTree = root.job_types
jobTypes = []
for childName in jobTypesTree:
jobTypes.append(jobTypesTree[childName])
return root, worker, job, jobTypes | python | def refineData(root, options):
worker = root.worker
job = root.jobs
jobTypesTree = root.job_types
jobTypes = []
for childName in jobTypesTree:
jobTypes.append(jobTypesTree[childName])
return root, worker, job, jobTypes | [
"def",
"refineData",
"(",
"root",
",",
"options",
")",
":",
"worker",
"=",
"root",
".",
"worker",
"job",
"=",
"root",
".",
"jobs",
"jobTypesTree",
"=",
"root",
".",
"job_types",
"jobTypes",
"=",
"[",
"]",
"for",
"childName",
"in",
"jobTypesTree",
":",
... | walk down from the root and gather up the important bits. | [
"walk",
"down",
"from",
"the",
"root",
"and",
"gather",
"up",
"the",
"important",
"bits",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L226-L235 |
225,136 | DataBiosphere/toil | src/toil/utils/toilStats.py | decorateSubHeader | def decorateSubHeader(title, columnWidths, options):
""" Add a marker to the correct field if the TITLE is sorted on.
"""
title = title.lower()
if title != options.sortCategory:
s = "| %*s%*s%*s%*s%*s " % (
columnWidths.getWidth(title, "min"), "min",
columnWidths.getWidth(title, "med"), "med",
columnWidths.getWidth(title, "ave"), "ave",
columnWidths.getWidth(title, "max"), "max",
columnWidths.getWidth(title, "total"), "total")
return s
else:
s = "| "
for field, width in [("min", columnWidths.getWidth(title, "min")),
("med", columnWidths.getWidth(title, "med")),
("ave", columnWidths.getWidth(title, "ave")),
("max", columnWidths.getWidth(title, "max")),
("total", columnWidths.getWidth(title, "total"))]:
if options.sortField == field:
s += "%*s*" % (width - 1, field)
else:
s += "%*s" % (width, field)
s += " "
return s | python | def decorateSubHeader(title, columnWidths, options):
title = title.lower()
if title != options.sortCategory:
s = "| %*s%*s%*s%*s%*s " % (
columnWidths.getWidth(title, "min"), "min",
columnWidths.getWidth(title, "med"), "med",
columnWidths.getWidth(title, "ave"), "ave",
columnWidths.getWidth(title, "max"), "max",
columnWidths.getWidth(title, "total"), "total")
return s
else:
s = "| "
for field, width in [("min", columnWidths.getWidth(title, "min")),
("med", columnWidths.getWidth(title, "med")),
("ave", columnWidths.getWidth(title, "ave")),
("max", columnWidths.getWidth(title, "max")),
("total", columnWidths.getWidth(title, "total"))]:
if options.sortField == field:
s += "%*s*" % (width - 1, field)
else:
s += "%*s" % (width, field)
s += " "
return s | [
"def",
"decorateSubHeader",
"(",
"title",
",",
"columnWidths",
",",
"options",
")",
":",
"title",
"=",
"title",
".",
"lower",
"(",
")",
"if",
"title",
"!=",
"options",
".",
"sortCategory",
":",
"s",
"=",
"\"| %*s%*s%*s%*s%*s \"",
"%",
"(",
"columnWidths",
... | Add a marker to the correct field if the TITLE is sorted on. | [
"Add",
"a",
"marker",
"to",
"the",
"correct",
"field",
"if",
"the",
"TITLE",
"is",
"sorted",
"on",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L319-L343 |
225,137 | DataBiosphere/toil | src/toil/utils/toilStats.py | get | def get(tree, name):
""" Return a float value attribute NAME from TREE.
"""
if name in tree:
value = tree[name]
else:
return float("nan")
try:
a = float(value)
except ValueError:
a = float("nan")
return a | python | def get(tree, name):
if name in tree:
value = tree[name]
else:
return float("nan")
try:
a = float(value)
except ValueError:
a = float("nan")
return a | [
"def",
"get",
"(",
"tree",
",",
"name",
")",
":",
"if",
"name",
"in",
"tree",
":",
"value",
"=",
"tree",
"[",
"name",
"]",
"else",
":",
"return",
"float",
"(",
"\"nan\"",
")",
"try",
":",
"a",
"=",
"float",
"(",
"value",
")",
"except",
"ValueErro... | Return a float value attribute NAME from TREE. | [
"Return",
"a",
"float",
"value",
"attribute",
"NAME",
"from",
"TREE",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L345-L356 |
225,138 | DataBiosphere/toil | src/toil/utils/toilStats.py | sortJobs | def sortJobs(jobTypes, options):
""" Return a jobTypes all sorted.
"""
longforms = {"med": "median",
"ave": "average",
"min": "min",
"total": "total",
"max": "max",}
sortField = longforms[options.sortField]
if (options.sortCategory == "time" or
options.sortCategory == "clock" or
options.sortCategory == "wait" or
options.sortCategory == "memory"
):
return sorted(
jobTypes,
key=lambda tag: getattr(tag, "%s_%s"
% (sortField, options.sortCategory)),
reverse=options.sortReverse)
elif options.sortCategory == "alpha":
return sorted(
jobTypes, key=lambda tag: tag.name,
reverse=options.sortReverse)
elif options.sortCategory == "count":
return sorted(jobTypes, key=lambda tag: tag.total_number,
reverse=options.sortReverse) | python | def sortJobs(jobTypes, options):
longforms = {"med": "median",
"ave": "average",
"min": "min",
"total": "total",
"max": "max",}
sortField = longforms[options.sortField]
if (options.sortCategory == "time" or
options.sortCategory == "clock" or
options.sortCategory == "wait" or
options.sortCategory == "memory"
):
return sorted(
jobTypes,
key=lambda tag: getattr(tag, "%s_%s"
% (sortField, options.sortCategory)),
reverse=options.sortReverse)
elif options.sortCategory == "alpha":
return sorted(
jobTypes, key=lambda tag: tag.name,
reverse=options.sortReverse)
elif options.sortCategory == "count":
return sorted(jobTypes, key=lambda tag: tag.total_number,
reverse=options.sortReverse) | [
"def",
"sortJobs",
"(",
"jobTypes",
",",
"options",
")",
":",
"longforms",
"=",
"{",
"\"med\"",
":",
"\"median\"",
",",
"\"ave\"",
":",
"\"average\"",
",",
"\"min\"",
":",
"\"min\"",
",",
"\"total\"",
":",
"\"total\"",
",",
"\"max\"",
":",
"\"max\"",
",",
... | Return a jobTypes all sorted. | [
"Return",
"a",
"jobTypes",
"all",
"sorted",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L358-L383 |
225,139 | DataBiosphere/toil | src/toil/utils/toilStats.py | reportPrettyData | def reportPrettyData(root, worker, job, job_types, options):
""" print the important bits out.
"""
out_str = "Batch System: %s\n" % root.batch_system
out_str += ("Default Cores: %s Default Memory: %s\n"
"Max Cores: %s\n" % (
reportNumber(get(root, "default_cores"), options),
reportMemory(get(root, "default_memory"), options, isBytes=True),
reportNumber(get(root, "max_cores"), options),
))
out_str += ("Total Clock: %s Total Runtime: %s\n" % (
reportTime(get(root, "total_clock"), options),
reportTime(get(root, "total_run_time"), options),
))
job_types = sortJobs(job_types, options)
columnWidths = computeColumnWidths(job_types, worker, job, options)
out_str += "Worker\n"
out_str += sprintTag("worker", worker, options, columnWidths=columnWidths)
out_str += "Job\n"
out_str += sprintTag("job", job, options, columnWidths=columnWidths)
for t in job_types:
out_str += " %s\n" % t.name
out_str += sprintTag(t.name, t, options, columnWidths=columnWidths)
return out_str | python | def reportPrettyData(root, worker, job, job_types, options):
out_str = "Batch System: %s\n" % root.batch_system
out_str += ("Default Cores: %s Default Memory: %s\n"
"Max Cores: %s\n" % (
reportNumber(get(root, "default_cores"), options),
reportMemory(get(root, "default_memory"), options, isBytes=True),
reportNumber(get(root, "max_cores"), options),
))
out_str += ("Total Clock: %s Total Runtime: %s\n" % (
reportTime(get(root, "total_clock"), options),
reportTime(get(root, "total_run_time"), options),
))
job_types = sortJobs(job_types, options)
columnWidths = computeColumnWidths(job_types, worker, job, options)
out_str += "Worker\n"
out_str += sprintTag("worker", worker, options, columnWidths=columnWidths)
out_str += "Job\n"
out_str += sprintTag("job", job, options, columnWidths=columnWidths)
for t in job_types:
out_str += " %s\n" % t.name
out_str += sprintTag(t.name, t, options, columnWidths=columnWidths)
return out_str | [
"def",
"reportPrettyData",
"(",
"root",
",",
"worker",
",",
"job",
",",
"job_types",
",",
"options",
")",
":",
"out_str",
"=",
"\"Batch System: %s\\n\"",
"%",
"root",
".",
"batch_system",
"out_str",
"+=",
"(",
"\"Default Cores: %s Default Memory: %s\\n\"",
"\"Max C... | print the important bits out. | [
"print",
"the",
"important",
"bits",
"out",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L385-L408 |
225,140 | DataBiosphere/toil | src/toil/utils/toilStats.py | updateColumnWidths | def updateColumnWidths(tag, cw, options):
""" Update the column width attributes for this tag's fields.
"""
longforms = {"med": "median",
"ave": "average",
"min": "min",
"total": "total",
"max": "max",}
for category in ["time", "clock", "wait", "memory"]:
if category in options.categories:
for field in ["min", "med", "ave", "max", "total"]:
t = getattr(tag, "%s_%s" % (longforms[field], category))
if category in ["time", "clock", "wait"]:
s = reportTime(t, options,
field=cw.getWidth(category, field)).strip()
else:
s = reportMemory(t, options,
field=cw.getWidth(category, field), isBytes=True).strip()
if len(s) >= cw.getWidth(category, field):
# this string is larger than max, width must be increased
cw.setWidth(category, field, len(s) + 1) | python | def updateColumnWidths(tag, cw, options):
longforms = {"med": "median",
"ave": "average",
"min": "min",
"total": "total",
"max": "max",}
for category in ["time", "clock", "wait", "memory"]:
if category in options.categories:
for field in ["min", "med", "ave", "max", "total"]:
t = getattr(tag, "%s_%s" % (longforms[field], category))
if category in ["time", "clock", "wait"]:
s = reportTime(t, options,
field=cw.getWidth(category, field)).strip()
else:
s = reportMemory(t, options,
field=cw.getWidth(category, field), isBytes=True).strip()
if len(s) >= cw.getWidth(category, field):
# this string is larger than max, width must be increased
cw.setWidth(category, field, len(s) + 1) | [
"def",
"updateColumnWidths",
"(",
"tag",
",",
"cw",
",",
"options",
")",
":",
"longforms",
"=",
"{",
"\"med\"",
":",
"\"median\"",
",",
"\"ave\"",
":",
"\"average\"",
",",
"\"min\"",
":",
"\"min\"",
",",
"\"total\"",
":",
"\"total\"",
",",
"\"max\"",
":",
... | Update the column width attributes for this tag's fields. | [
"Update",
"the",
"column",
"width",
"attributes",
"for",
"this",
"tag",
"s",
"fields",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L420-L440 |
225,141 | DataBiosphere/toil | src/toil/utils/toilStats.py | buildElement | def buildElement(element, items, itemName):
""" Create an element for output.
"""
def assertNonnegative(i,name):
if i < 0:
raise RuntimeError("Negative value %s reported for %s" %(i,name) )
else:
return float(i)
itemTimes = []
itemClocks = []
itemMemory = []
for item in items:
itemTimes.append(assertNonnegative(float(item["time"]), "time"))
itemClocks.append(assertNonnegative(float(item["clock"]), "clock"))
itemMemory.append(assertNonnegative(float(item["memory"]), "memory"))
assert len(itemClocks) == len(itemTimes) == len(itemMemory)
itemWaits=[]
for index in range(0,len(itemTimes)):
itemWaits.append(itemTimes[index] - itemClocks[index])
itemWaits.sort()
itemTimes.sort()
itemClocks.sort()
itemMemory.sort()
if len(itemTimes) == 0:
itemTimes.append(0)
itemClocks.append(0)
itemWaits.append(0)
itemMemory.append(0)
element[itemName]=Expando(
total_number=float(len(items)),
total_time=float(sum(itemTimes)),
median_time=float(itemTimes[old_div(len(itemTimes),2)]),
average_time=float(old_div(sum(itemTimes),len(itemTimes))),
min_time=float(min(itemTimes)),
max_time=float(max(itemTimes)),
total_clock=float(sum(itemClocks)),
median_clock=float(itemClocks[old_div(len(itemClocks),2)]),
average_clock=float(old_div(sum(itemClocks),len(itemClocks))),
min_clock=float(min(itemClocks)),
max_clock=float(max(itemClocks)),
total_wait=float(sum(itemWaits)),
median_wait=float(itemWaits[old_div(len(itemWaits),2)]),
average_wait=float(old_div(sum(itemWaits),len(itemWaits))),
min_wait=float(min(itemWaits)),
max_wait=float(max(itemWaits)),
total_memory=float(sum(itemMemory)),
median_memory=float(itemMemory[old_div(len(itemMemory),2)]),
average_memory=float(old_div(sum(itemMemory),len(itemMemory))),
min_memory=float(min(itemMemory)),
max_memory=float(max(itemMemory)),
name=itemName
)
return element[itemName] | python | def buildElement(element, items, itemName):
def assertNonnegative(i,name):
if i < 0:
raise RuntimeError("Negative value %s reported for %s" %(i,name) )
else:
return float(i)
itemTimes = []
itemClocks = []
itemMemory = []
for item in items:
itemTimes.append(assertNonnegative(float(item["time"]), "time"))
itemClocks.append(assertNonnegative(float(item["clock"]), "clock"))
itemMemory.append(assertNonnegative(float(item["memory"]), "memory"))
assert len(itemClocks) == len(itemTimes) == len(itemMemory)
itemWaits=[]
for index in range(0,len(itemTimes)):
itemWaits.append(itemTimes[index] - itemClocks[index])
itemWaits.sort()
itemTimes.sort()
itemClocks.sort()
itemMemory.sort()
if len(itemTimes) == 0:
itemTimes.append(0)
itemClocks.append(0)
itemWaits.append(0)
itemMemory.append(0)
element[itemName]=Expando(
total_number=float(len(items)),
total_time=float(sum(itemTimes)),
median_time=float(itemTimes[old_div(len(itemTimes),2)]),
average_time=float(old_div(sum(itemTimes),len(itemTimes))),
min_time=float(min(itemTimes)),
max_time=float(max(itemTimes)),
total_clock=float(sum(itemClocks)),
median_clock=float(itemClocks[old_div(len(itemClocks),2)]),
average_clock=float(old_div(sum(itemClocks),len(itemClocks))),
min_clock=float(min(itemClocks)),
max_clock=float(max(itemClocks)),
total_wait=float(sum(itemWaits)),
median_wait=float(itemWaits[old_div(len(itemWaits),2)]),
average_wait=float(old_div(sum(itemWaits),len(itemWaits))),
min_wait=float(min(itemWaits)),
max_wait=float(max(itemWaits)),
total_memory=float(sum(itemMemory)),
median_memory=float(itemMemory[old_div(len(itemMemory),2)]),
average_memory=float(old_div(sum(itemMemory),len(itemMemory))),
min_memory=float(min(itemMemory)),
max_memory=float(max(itemMemory)),
name=itemName
)
return element[itemName] | [
"def",
"buildElement",
"(",
"element",
",",
"items",
",",
"itemName",
")",
":",
"def",
"assertNonnegative",
"(",
"i",
",",
"name",
")",
":",
"if",
"i",
"<",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Negative value %s reported for %s\"",
"%",
"(",
"i",
","... | Create an element for output. | [
"Create",
"an",
"element",
"for",
"output",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L442-L499 |
225,142 | DataBiosphere/toil | src/toil/utils/toilStats.py | getStats | def getStats(jobStore):
""" Collect and return the stats and config data.
"""
def aggregateStats(fileHandle,aggregateObject):
try:
stats = json.load(fileHandle, object_hook=Expando)
for key in list(stats.keys()):
if key in aggregateObject:
aggregateObject[key].append(stats[key])
else:
aggregateObject[key]=[stats[key]]
except ValueError:
logger.critical("File %s contains corrupted json. Skipping file." % fileHandle)
pass # The file is corrupted.
aggregateObject = Expando()
callBack = partial(aggregateStats, aggregateObject=aggregateObject)
jobStore.readStatsAndLogging(callBack, readAll=True)
return aggregateObject | python | def getStats(jobStore):
def aggregateStats(fileHandle,aggregateObject):
try:
stats = json.load(fileHandle, object_hook=Expando)
for key in list(stats.keys()):
if key in aggregateObject:
aggregateObject[key].append(stats[key])
else:
aggregateObject[key]=[stats[key]]
except ValueError:
logger.critical("File %s contains corrupted json. Skipping file." % fileHandle)
pass # The file is corrupted.
aggregateObject = Expando()
callBack = partial(aggregateStats, aggregateObject=aggregateObject)
jobStore.readStatsAndLogging(callBack, readAll=True)
return aggregateObject | [
"def",
"getStats",
"(",
"jobStore",
")",
":",
"def",
"aggregateStats",
"(",
"fileHandle",
",",
"aggregateObject",
")",
":",
"try",
":",
"stats",
"=",
"json",
".",
"load",
"(",
"fileHandle",
",",
"object_hook",
"=",
"Expando",
")",
"for",
"key",
"in",
"li... | Collect and return the stats and config data. | [
"Collect",
"and",
"return",
"the",
"stats",
"and",
"config",
"data",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L513-L531 |
225,143 | DataBiosphere/toil | src/toil/utils/toilStats.py | processData | def processData(config, stats):
"""
Collate the stats and report
"""
if 'total_time' not in stats or 'total_clock' not in stats:
# toil job not finished yet
stats.total_time = [0.0]
stats.total_clock = [0.0]
stats.total_time = sum([float(number) for number in stats.total_time])
stats.total_clock = sum([float(number) for number in stats.total_clock])
collatedStatsTag = Expando(total_run_time=stats.total_time,
total_clock=stats.total_clock,
batch_system=config.batchSystem,
default_memory=str(config.defaultMemory),
default_cores=str(config.defaultCores),
max_cores=str(config.maxCores)
)
# Add worker info
worker = [_f for _f in getattr(stats, 'workers', []) if _f]
jobs = [_f for _f in getattr(stats, 'jobs', []) if _f]
jobs = [item for sublist in jobs for item in sublist]
def fn4(job):
try:
return list(jobs)
except TypeError:
return []
buildElement(collatedStatsTag, worker, "worker")
createSummary(buildElement(collatedStatsTag, jobs, "jobs"),
getattr(stats, 'workers', []), "worker", fn4)
# Get info for each job
jobNames = set()
for job in jobs:
jobNames.add(job.class_name)
jobTypesTag = Expando()
collatedStatsTag.job_types = jobTypesTag
for jobName in jobNames:
jobTypes = [ job for job in jobs if job.class_name == jobName ]
buildElement(jobTypesTag, jobTypes, jobName)
collatedStatsTag.name = "collatedStatsTag"
return collatedStatsTag | python | def processData(config, stats):
if 'total_time' not in stats or 'total_clock' not in stats:
# toil job not finished yet
stats.total_time = [0.0]
stats.total_clock = [0.0]
stats.total_time = sum([float(number) for number in stats.total_time])
stats.total_clock = sum([float(number) for number in stats.total_clock])
collatedStatsTag = Expando(total_run_time=stats.total_time,
total_clock=stats.total_clock,
batch_system=config.batchSystem,
default_memory=str(config.defaultMemory),
default_cores=str(config.defaultCores),
max_cores=str(config.maxCores)
)
# Add worker info
worker = [_f for _f in getattr(stats, 'workers', []) if _f]
jobs = [_f for _f in getattr(stats, 'jobs', []) if _f]
jobs = [item for sublist in jobs for item in sublist]
def fn4(job):
try:
return list(jobs)
except TypeError:
return []
buildElement(collatedStatsTag, worker, "worker")
createSummary(buildElement(collatedStatsTag, jobs, "jobs"),
getattr(stats, 'workers', []), "worker", fn4)
# Get info for each job
jobNames = set()
for job in jobs:
jobNames.add(job.class_name)
jobTypesTag = Expando()
collatedStatsTag.job_types = jobTypesTag
for jobName in jobNames:
jobTypes = [ job for job in jobs if job.class_name == jobName ]
buildElement(jobTypesTag, jobTypes, jobName)
collatedStatsTag.name = "collatedStatsTag"
return collatedStatsTag | [
"def",
"processData",
"(",
"config",
",",
"stats",
")",
":",
"if",
"'total_time'",
"not",
"in",
"stats",
"or",
"'total_clock'",
"not",
"in",
"stats",
":",
"# toil job not finished yet",
"stats",
".",
"total_time",
"=",
"[",
"0.0",
"]",
"stats",
".",
"total_c... | Collate the stats and report | [
"Collate",
"the",
"stats",
"and",
"report"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L534-L578 |
225,144 | DataBiosphere/toil | src/toil/utils/toilStats.py | ColumnWidths.title | def title(self, category):
""" Return the total printed length of this category item.
"""
return sum(
[self.getWidth(category, x) for x in self.fields]) | python | def title(self, category):
return sum(
[self.getWidth(category, x) for x in self.fields]) | [
"def",
"title",
"(",
"self",
",",
"category",
")",
":",
"return",
"sum",
"(",
"[",
"self",
".",
"getWidth",
"(",
"category",
",",
"x",
")",
"for",
"x",
"in",
"self",
".",
"fields",
"]",
")"
] | Return the total printed length of this category item. | [
"Return",
"the",
"total",
"printed",
"length",
"of",
"this",
"category",
"item",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStats.py#L49-L53 |
225,145 | DataBiosphere/toil | src/toil/worker.py | nextChainableJobGraph | def nextChainableJobGraph(jobGraph, jobStore):
"""Returns the next chainable jobGraph after this jobGraph if one
exists, or None if the chain must terminate.
"""
#If no more jobs to run or services not finished, quit
if len(jobGraph.stack) == 0 or len(jobGraph.services) > 0 or jobGraph.checkpoint != None:
logger.debug("Stopping running chain of jobs: length of stack: %s, services: %s, checkpoint: %s",
len(jobGraph.stack), len(jobGraph.services), jobGraph.checkpoint != None)
return None
#Get the next set of jobs to run
jobs = jobGraph.stack[-1]
assert len(jobs) > 0
#If there are 2 or more jobs to run in parallel we quit
if len(jobs) >= 2:
logger.debug("No more jobs can run in series by this worker,"
" it's got %i children", len(jobs)-1)
return None
#We check the requirements of the jobGraph to see if we can run it
#within the current worker
successorJobNode = jobs[0]
if successorJobNode.memory > jobGraph.memory:
logger.debug("We need more memory for the next job, so finishing")
return None
if successorJobNode.cores > jobGraph.cores:
logger.debug("We need more cores for the next job, so finishing")
return None
if successorJobNode.disk > jobGraph.disk:
logger.debug("We need more disk for the next job, so finishing")
return None
if successorJobNode.preemptable != jobGraph.preemptable:
logger.debug("Preemptability is different for the next job, returning to the leader")
return None
if successorJobNode.predecessorNumber > 1:
logger.debug("The jobGraph has multiple predecessors, we must return to the leader.")
return None
# Load the successor jobGraph
successorJobGraph = jobStore.load(successorJobNode.jobStoreID)
# Somewhat ugly, but check if job is a checkpoint job and quit if
# so
if successorJobGraph.command.startswith("_toil "):
#Load the job
successorJob = Job._loadJob(successorJobGraph.command, jobStore)
# Check it is not a checkpoint
if successorJob.checkpoint:
logger.debug("Next job is checkpoint, so finishing")
return None
# Made it through! This job is chainable.
return successorJobGraph | python | def nextChainableJobGraph(jobGraph, jobStore):
#If no more jobs to run or services not finished, quit
if len(jobGraph.stack) == 0 or len(jobGraph.services) > 0 or jobGraph.checkpoint != None:
logger.debug("Stopping running chain of jobs: length of stack: %s, services: %s, checkpoint: %s",
len(jobGraph.stack), len(jobGraph.services), jobGraph.checkpoint != None)
return None
#Get the next set of jobs to run
jobs = jobGraph.stack[-1]
assert len(jobs) > 0
#If there are 2 or more jobs to run in parallel we quit
if len(jobs) >= 2:
logger.debug("No more jobs can run in series by this worker,"
" it's got %i children", len(jobs)-1)
return None
#We check the requirements of the jobGraph to see if we can run it
#within the current worker
successorJobNode = jobs[0]
if successorJobNode.memory > jobGraph.memory:
logger.debug("We need more memory for the next job, so finishing")
return None
if successorJobNode.cores > jobGraph.cores:
logger.debug("We need more cores for the next job, so finishing")
return None
if successorJobNode.disk > jobGraph.disk:
logger.debug("We need more disk for the next job, so finishing")
return None
if successorJobNode.preemptable != jobGraph.preemptable:
logger.debug("Preemptability is different for the next job, returning to the leader")
return None
if successorJobNode.predecessorNumber > 1:
logger.debug("The jobGraph has multiple predecessors, we must return to the leader.")
return None
# Load the successor jobGraph
successorJobGraph = jobStore.load(successorJobNode.jobStoreID)
# Somewhat ugly, but check if job is a checkpoint job and quit if
# so
if successorJobGraph.command.startswith("_toil "):
#Load the job
successorJob = Job._loadJob(successorJobGraph.command, jobStore)
# Check it is not a checkpoint
if successorJob.checkpoint:
logger.debug("Next job is checkpoint, so finishing")
return None
# Made it through! This job is chainable.
return successorJobGraph | [
"def",
"nextChainableJobGraph",
"(",
"jobGraph",
",",
"jobStore",
")",
":",
"#If no more jobs to run or services not finished, quit",
"if",
"len",
"(",
"jobGraph",
".",
"stack",
")",
"==",
"0",
"or",
"len",
"(",
"jobGraph",
".",
"services",
")",
">",
"0",
"or",
... | Returns the next chainable jobGraph after this jobGraph if one
exists, or None if the chain must terminate. | [
"Returns",
"the",
"next",
"chainable",
"jobGraph",
"after",
"this",
"jobGraph",
"if",
"one",
"exists",
"or",
"None",
"if",
"the",
"chain",
"must",
"terminate",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/worker.py#L46-L100 |
225,146 | DataBiosphere/toil | src/toil/realtimeLogger.py | LoggingDatagramHandler.handle | def handle(self):
"""
Handle a single message. SocketServer takes care of splitting out the messages.
Messages are JSON-encoded logging module records.
"""
# Unpack the data from the request
data, socket = self.request
try:
# Parse it as JSON
message_attrs = json.loads(data.decode('utf-8'))
# Fluff it up into a proper logging record
record = logging.makeLogRecord(message_attrs)
except:
# Complain someone is sending us bad logging data
logging.error("Malformed log message from {}".format(self.client_address[0]))
else:
# Log level filtering should have been done on the remote end. The handle() method
# skips it on this end.
log.handle(record) | python | def handle(self):
# Unpack the data from the request
data, socket = self.request
try:
# Parse it as JSON
message_attrs = json.loads(data.decode('utf-8'))
# Fluff it up into a proper logging record
record = logging.makeLogRecord(message_attrs)
except:
# Complain someone is sending us bad logging data
logging.error("Malformed log message from {}".format(self.client_address[0]))
else:
# Log level filtering should have been done on the remote end. The handle() method
# skips it on this end.
log.handle(record) | [
"def",
"handle",
"(",
"self",
")",
":",
"# Unpack the data from the request",
"data",
",",
"socket",
"=",
"self",
".",
"request",
"try",
":",
"# Parse it as JSON",
"message_attrs",
"=",
"json",
".",
"loads",
"(",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
"... | Handle a single message. SocketServer takes care of splitting out the messages.
Messages are JSON-encoded logging module records. | [
"Handle",
"a",
"single",
"message",
".",
"SocketServer",
"takes",
"care",
"of",
"splitting",
"out",
"the",
"messages",
".",
"Messages",
"are",
"JSON",
"-",
"encoded",
"logging",
"module",
"records",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/realtimeLogger.py#L46-L66 |
225,147 | DataBiosphere/toil | src/toil/realtimeLogger.py | RealtimeLogger._stopLeader | def _stopLeader(cls):
"""
Stop the server on the leader.
"""
with cls.lock:
assert cls.initialized > 0
cls.initialized -= 1
if cls.initialized == 0:
if cls.loggingServer:
log.info('Stopping real-time logging server.')
cls.loggingServer.shutdown()
cls.loggingServer = None
if cls.serverThread:
log.info('Joining real-time logging server thread.')
cls.serverThread.join()
cls.serverThread = None
for k in list(os.environ.keys()):
if k.startswith(cls.envPrefix):
os.environ.pop(k) | python | def _stopLeader(cls):
with cls.lock:
assert cls.initialized > 0
cls.initialized -= 1
if cls.initialized == 0:
if cls.loggingServer:
log.info('Stopping real-time logging server.')
cls.loggingServer.shutdown()
cls.loggingServer = None
if cls.serverThread:
log.info('Joining real-time logging server thread.')
cls.serverThread.join()
cls.serverThread = None
for k in list(os.environ.keys()):
if k.startswith(cls.envPrefix):
os.environ.pop(k) | [
"def",
"_stopLeader",
"(",
"cls",
")",
":",
"with",
"cls",
".",
"lock",
":",
"assert",
"cls",
".",
"initialized",
">",
"0",
"cls",
".",
"initialized",
"-=",
"1",
"if",
"cls",
".",
"initialized",
"==",
"0",
":",
"if",
"cls",
".",
"loggingServer",
":",... | Stop the server on the leader. | [
"Stop",
"the",
"server",
"on",
"the",
"leader",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/realtimeLogger.py#L162-L180 |
225,148 | DataBiosphere/toil | src/toil/realtimeLogger.py | RealtimeLogger.getLogger | def getLogger(cls):
"""
Get the logger that logs real-time to the leader.
Note that if the returned logger is used on the leader, you will see the message twice,
since it still goes to the normal log handlers, too.
"""
# Only do the setup once, so we don't add a handler every time we log. Use a lock to do
# so safely even if we're being called in different threads. Use double-checked locking
# to reduce the overhead introduced by the lock.
if cls.logger is None:
with cls.lock:
if cls.logger is None:
cls.logger = logging.getLogger('toil-rt')
try:
level = os.environ[cls.envPrefix + 'LEVEL']
except KeyError:
# There is no server running on the leader, so suppress most log messages
# and skip the UDP stuff.
cls.logger.setLevel(logging.CRITICAL)
else:
# Adopt the logging level set on the leader.
toil.lib.bioio.setLogLevel(level, cls.logger)
try:
address = os.environ[cls.envPrefix + 'ADDRESS']
except KeyError:
pass
else:
# We know where to send messages to, so send them.
host, port = address.split(':')
cls.logger.addHandler(JSONDatagramHandler(host, int(port)))
return cls.logger | python | def getLogger(cls):
# Only do the setup once, so we don't add a handler every time we log. Use a lock to do
# so safely even if we're being called in different threads. Use double-checked locking
# to reduce the overhead introduced by the lock.
if cls.logger is None:
with cls.lock:
if cls.logger is None:
cls.logger = logging.getLogger('toil-rt')
try:
level = os.environ[cls.envPrefix + 'LEVEL']
except KeyError:
# There is no server running on the leader, so suppress most log messages
# and skip the UDP stuff.
cls.logger.setLevel(logging.CRITICAL)
else:
# Adopt the logging level set on the leader.
toil.lib.bioio.setLogLevel(level, cls.logger)
try:
address = os.environ[cls.envPrefix + 'ADDRESS']
except KeyError:
pass
else:
# We know where to send messages to, so send them.
host, port = address.split(':')
cls.logger.addHandler(JSONDatagramHandler(host, int(port)))
return cls.logger | [
"def",
"getLogger",
"(",
"cls",
")",
":",
"# Only do the setup once, so we don't add a handler every time we log. Use a lock to do",
"# so safely even if we're being called in different threads. Use double-checked locking",
"# to reduce the overhead introduced by the lock.",
"if",
"cls",
".",
... | Get the logger that logs real-time to the leader.
Note that if the returned logger is used on the leader, you will see the message twice,
since it still goes to the normal log handlers, too. | [
"Get",
"the",
"logger",
"that",
"logs",
"real",
"-",
"time",
"to",
"the",
"leader",
".",
"Note",
"that",
"if",
"the",
"returned",
"logger",
"is",
"used",
"on",
"the",
"leader",
"you",
"will",
"see",
"the",
"message",
"twice",
"since",
"it",
"still",
"g... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/realtimeLogger.py#L183-L214 |
225,149 | DataBiosphere/toil | src/toil/jobStores/aws/utils.py | uploadFromPath | def uploadFromPath(localFilePath, partSize, bucket, fileID, headers):
"""
Uploads a file to s3, using multipart uploading if applicable
:param str localFilePath: Path of the file to upload to s3
:param int partSize: max size of each part in the multipart upload, in bytes
:param boto.s3.Bucket bucket: the s3 bucket to upload to
:param str fileID: the name of the file to upload to
:param headers: http headers to use when uploading - generally used for encryption purposes
:return: version of the newly uploaded file
"""
file_size, file_time = fileSizeAndTime(localFilePath)
if file_size <= partSize:
key = bucket.new_key(key_name=bytes(fileID))
key.name = fileID
for attempt in retry_s3():
with attempt:
key.set_contents_from_filename(localFilePath, headers=headers)
version = key.version_id
else:
with open(localFilePath, 'rb') as f:
version = chunkedFileUpload(f, bucket, fileID, file_size, headers, partSize)
for attempt in retry_s3():
with attempt:
key = bucket.get_key(bytes(fileID),
headers=headers,
version_id=version)
assert key.size == file_size
# Make reasonably sure that the file wasn't touched during the upload
assert fileSizeAndTime(localFilePath) == (file_size, file_time)
return version | python | def uploadFromPath(localFilePath, partSize, bucket, fileID, headers):
file_size, file_time = fileSizeAndTime(localFilePath)
if file_size <= partSize:
key = bucket.new_key(key_name=bytes(fileID))
key.name = fileID
for attempt in retry_s3():
with attempt:
key.set_contents_from_filename(localFilePath, headers=headers)
version = key.version_id
else:
with open(localFilePath, 'rb') as f:
version = chunkedFileUpload(f, bucket, fileID, file_size, headers, partSize)
for attempt in retry_s3():
with attempt:
key = bucket.get_key(bytes(fileID),
headers=headers,
version_id=version)
assert key.size == file_size
# Make reasonably sure that the file wasn't touched during the upload
assert fileSizeAndTime(localFilePath) == (file_size, file_time)
return version | [
"def",
"uploadFromPath",
"(",
"localFilePath",
",",
"partSize",
",",
"bucket",
",",
"fileID",
",",
"headers",
")",
":",
"file_size",
",",
"file_time",
"=",
"fileSizeAndTime",
"(",
"localFilePath",
")",
"if",
"file_size",
"<=",
"partSize",
":",
"key",
"=",
"b... | Uploads a file to s3, using multipart uploading if applicable
:param str localFilePath: Path of the file to upload to s3
:param int partSize: max size of each part in the multipart upload, in bytes
:param boto.s3.Bucket bucket: the s3 bucket to upload to
:param str fileID: the name of the file to upload to
:param headers: http headers to use when uploading - generally used for encryption purposes
:return: version of the newly uploaded file | [
"Uploads",
"a",
"file",
"to",
"s3",
"using",
"multipart",
"uploading",
"if",
"applicable"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/utils.py#L194-L224 |
225,150 | DataBiosphere/toil | src/toil/jobStores/aws/utils.py | copyKeyMultipart | def copyKeyMultipart(srcBucketName, srcKeyName, srcKeyVersion, dstBucketName, dstKeyName, sseAlgorithm=None, sseKey=None,
copySourceSseAlgorithm=None, copySourceSseKey=None):
"""
Copies a key from a source key to a destination key in multiple parts. Note that if the
destination key exists it will be overwritten implicitly, and if it does not exist a new
key will be created. If the destination bucket does not exist an error will be raised.
:param str srcBucketName: The name of the bucket to be copied from.
:param str srcKeyName: The name of the key to be copied from.
:param str srcKeyVersion: The version of the key to be copied from.
:param str dstBucketName: The name of the destination bucket for the copy.
:param str dstKeyName: The name of the destination key that will be created or overwritten.
:param str sseAlgorithm: Server-side encryption algorithm for the destination.
:param str sseKey: Server-side encryption key for the destination.
:param str copySourceSseAlgorithm: Server-side encryption algorithm for the source.
:param str copySourceSseKey: Server-side encryption key for the source.
:rtype: str
:return: The version of the copied file (or None if versioning is not enabled for dstBucket).
"""
s3 = boto3.resource('s3')
dstBucket = s3.Bucket(oldstr(dstBucketName))
dstObject = dstBucket.Object(oldstr(dstKeyName))
copySource = {'Bucket': oldstr(srcBucketName), 'Key': oldstr(srcKeyName)}
if srcKeyVersion is not None:
copySource['VersionId'] = oldstr(srcKeyVersion)
# The boto3 functions don't allow passing parameters as None to
# indicate they weren't provided. So we have to do a bit of work
# to ensure we only provide the parameters when they are actually
# required.
destEncryptionArgs = {}
if sseKey is not None:
destEncryptionArgs.update({'SSECustomerAlgorithm': sseAlgorithm,
'SSECustomerKey': sseKey})
copyEncryptionArgs = {}
if copySourceSseKey is not None:
copyEncryptionArgs.update({'CopySourceSSECustomerAlgorithm': copySourceSseAlgorithm,
'CopySourceSSECustomerKey': copySourceSseKey})
copyEncryptionArgs.update(destEncryptionArgs)
dstObject.copy(copySource, ExtraArgs=copyEncryptionArgs)
# Unfortunately, boto3's managed copy doesn't return the version
# that it actually copied to. So we have to check immediately
# after, leaving open the possibility that it may have been
# modified again in the few seconds since the copy finished. There
# isn't much we can do about it.
info = boto3.client('s3').head_object(Bucket=dstObject.bucket_name, Key=dstObject.key,
**destEncryptionArgs)
return info.get('VersionId', None) | python | def copyKeyMultipart(srcBucketName, srcKeyName, srcKeyVersion, dstBucketName, dstKeyName, sseAlgorithm=None, sseKey=None,
copySourceSseAlgorithm=None, copySourceSseKey=None):
s3 = boto3.resource('s3')
dstBucket = s3.Bucket(oldstr(dstBucketName))
dstObject = dstBucket.Object(oldstr(dstKeyName))
copySource = {'Bucket': oldstr(srcBucketName), 'Key': oldstr(srcKeyName)}
if srcKeyVersion is not None:
copySource['VersionId'] = oldstr(srcKeyVersion)
# The boto3 functions don't allow passing parameters as None to
# indicate they weren't provided. So we have to do a bit of work
# to ensure we only provide the parameters when they are actually
# required.
destEncryptionArgs = {}
if sseKey is not None:
destEncryptionArgs.update({'SSECustomerAlgorithm': sseAlgorithm,
'SSECustomerKey': sseKey})
copyEncryptionArgs = {}
if copySourceSseKey is not None:
copyEncryptionArgs.update({'CopySourceSSECustomerAlgorithm': copySourceSseAlgorithm,
'CopySourceSSECustomerKey': copySourceSseKey})
copyEncryptionArgs.update(destEncryptionArgs)
dstObject.copy(copySource, ExtraArgs=copyEncryptionArgs)
# Unfortunately, boto3's managed copy doesn't return the version
# that it actually copied to. So we have to check immediately
# after, leaving open the possibility that it may have been
# modified again in the few seconds since the copy finished. There
# isn't much we can do about it.
info = boto3.client('s3').head_object(Bucket=dstObject.bucket_name, Key=dstObject.key,
**destEncryptionArgs)
return info.get('VersionId', None) | [
"def",
"copyKeyMultipart",
"(",
"srcBucketName",
",",
"srcKeyName",
",",
"srcKeyVersion",
",",
"dstBucketName",
",",
"dstKeyName",
",",
"sseAlgorithm",
"=",
"None",
",",
"sseKey",
"=",
"None",
",",
"copySourceSseAlgorithm",
"=",
"None",
",",
"copySourceSseKey",
"=... | Copies a key from a source key to a destination key in multiple parts. Note that if the
destination key exists it will be overwritten implicitly, and if it does not exist a new
key will be created. If the destination bucket does not exist an error will be raised.
:param str srcBucketName: The name of the bucket to be copied from.
:param str srcKeyName: The name of the key to be copied from.
:param str srcKeyVersion: The version of the key to be copied from.
:param str dstBucketName: The name of the destination bucket for the copy.
:param str dstKeyName: The name of the destination key that will be created or overwritten.
:param str sseAlgorithm: Server-side encryption algorithm for the destination.
:param str sseKey: Server-side encryption key for the destination.
:param str copySourceSseAlgorithm: Server-side encryption algorithm for the source.
:param str copySourceSseKey: Server-side encryption key for the source.
:rtype: str
:return: The version of the copied file (or None if versioning is not enabled for dstBucket). | [
"Copies",
"a",
"key",
"from",
"a",
"source",
"key",
"to",
"a",
"destination",
"key",
"in",
"multiple",
"parts",
".",
"Note",
"that",
"if",
"the",
"destination",
"key",
"exists",
"it",
"will",
"be",
"overwritten",
"implicitly",
"and",
"if",
"it",
"does",
... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/utils.py#L259-L309 |
225,151 | DataBiosphere/toil | src/toil/jobStores/aws/utils.py | _put_attributes_using_post | def _put_attributes_using_post(self, domain_or_name, item_name, attributes,
replace=True, expected_value=None):
"""
Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET
The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit
for attribute values. Using POST prevents that.
https://github.com/BD2KGenomics/toil/issues/502
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name,
'ItemName': item_name}
self._build_name_value_list(params, attributes, replace)
if expected_value:
self._build_expected_value(params, expected_value)
# The addition of the verb keyword argument is the only difference to put_attributes (Hannes)
return self.get_status('PutAttributes', params, verb='POST') | python | def _put_attributes_using_post(self, domain_or_name, item_name, attributes,
replace=True, expected_value=None):
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name,
'ItemName': item_name}
self._build_name_value_list(params, attributes, replace)
if expected_value:
self._build_expected_value(params, expected_value)
# The addition of the verb keyword argument is the only difference to put_attributes (Hannes)
return self.get_status('PutAttributes', params, verb='POST') | [
"def",
"_put_attributes_using_post",
"(",
"self",
",",
"domain_or_name",
",",
"item_name",
",",
"attributes",
",",
"replace",
"=",
"True",
",",
"expected_value",
"=",
"None",
")",
":",
"domain",
",",
"domain_name",
"=",
"self",
".",
"get_domain_and_name",
"(",
... | Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET
The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit
for attribute values. Using POST prevents that.
https://github.com/BD2KGenomics/toil/issues/502 | [
"Monkey",
"-",
"patched",
"version",
"of",
"SDBConnection",
".",
"put_attributes",
"that",
"uses",
"POST",
"instead",
"of",
"GET"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/utils.py#L311-L328 |
225,152 | DataBiosphere/toil | src/toil/lib/bioio.py | logFile | def logFile(fileName, printFunction=logger.info):
"""Writes out a formatted version of the given log file
"""
printFunction("Reporting file: %s" % fileName)
shortName = fileName.split("/")[-1]
fileHandle = open(fileName, 'r')
line = fileHandle.readline()
while line != '':
if line[-1] == '\n':
line = line[:-1]
printFunction("%s:\t%s" % (shortName, line))
line = fileHandle.readline()
fileHandle.close() | python | def logFile(fileName, printFunction=logger.info):
printFunction("Reporting file: %s" % fileName)
shortName = fileName.split("/")[-1]
fileHandle = open(fileName, 'r')
line = fileHandle.readline()
while line != '':
if line[-1] == '\n':
line = line[:-1]
printFunction("%s:\t%s" % (shortName, line))
line = fileHandle.readline()
fileHandle.close() | [
"def",
"logFile",
"(",
"fileName",
",",
"printFunction",
"=",
"logger",
".",
"info",
")",
":",
"printFunction",
"(",
"\"Reporting file: %s\"",
"%",
"fileName",
")",
"shortName",
"=",
"fileName",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"fileHan... | Writes out a formatted version of the given log file | [
"Writes",
"out",
"a",
"formatted",
"version",
"of",
"the",
"given",
"log",
"file"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/bioio.py#L82-L94 |
225,153 | DataBiosphere/toil | src/toil/lib/bioio.py | logStream | def logStream(fileHandle, shortName, printFunction=logger.info):
"""Writes out a formatted version of the given log stream.
"""
printFunction("Reporting file: %s" % shortName)
line = fileHandle.readline()
while line != '':
if line[-1] == '\n':
line = line[:-1]
printFunction("%s:\t%s" % (shortName, line))
line = fileHandle.readline()
fileHandle.close() | python | def logStream(fileHandle, shortName, printFunction=logger.info):
printFunction("Reporting file: %s" % shortName)
line = fileHandle.readline()
while line != '':
if line[-1] == '\n':
line = line[:-1]
printFunction("%s:\t%s" % (shortName, line))
line = fileHandle.readline()
fileHandle.close() | [
"def",
"logStream",
"(",
"fileHandle",
",",
"shortName",
",",
"printFunction",
"=",
"logger",
".",
"info",
")",
":",
"printFunction",
"(",
"\"Reporting file: %s\"",
"%",
"shortName",
")",
"line",
"=",
"fileHandle",
".",
"readline",
"(",
")",
"while",
"line",
... | Writes out a formatted version of the given log stream. | [
"Writes",
"out",
"a",
"formatted",
"version",
"of",
"the",
"given",
"log",
"stream",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/bioio.py#L96-L106 |
225,154 | DataBiosphere/toil | src/toil/lib/bioio.py | system | def system(command):
"""
A convenience wrapper around subprocess.check_call that logs the command before passing it
on. The command can be either a string or a sequence of strings. If it is a string shell=True
will be passed to subprocess.check_call.
:type command: str | sequence[string]
"""
logger.debug('Running: %r', command)
subprocess.check_call(command, shell=isinstance(command, string_types), bufsize=-1) | python | def system(command):
logger.debug('Running: %r', command)
subprocess.check_call(command, shell=isinstance(command, string_types), bufsize=-1) | [
"def",
"system",
"(",
"command",
")",
":",
"logger",
".",
"debug",
"(",
"'Running: %r'",
",",
"command",
")",
"subprocess",
".",
"check_call",
"(",
"command",
",",
"shell",
"=",
"isinstance",
"(",
"command",
",",
"string_types",
")",
",",
"bufsize",
"=",
... | A convenience wrapper around subprocess.check_call that logs the command before passing it
on. The command can be either a string or a sequence of strings. If it is a string shell=True
will be passed to subprocess.check_call.
:type command: str | sequence[string] | [
"A",
"convenience",
"wrapper",
"around",
"subprocess",
".",
"check_call",
"that",
"logs",
"the",
"command",
"before",
"passing",
"it",
"on",
".",
"The",
"command",
"can",
"be",
"either",
"a",
"string",
"or",
"a",
"sequence",
"of",
"strings",
".",
"If",
"it... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/bioio.py#L167-L176 |
225,155 | DataBiosphere/toil | src/toil/lib/bioio.py | absSymPath | def absSymPath(path):
"""like os.path.abspath except it doesn't dereference symlinks
"""
curr_path = os.getcwd()
return os.path.normpath(os.path.join(curr_path, path)) | python | def absSymPath(path):
curr_path = os.getcwd()
return os.path.normpath(os.path.join(curr_path, path)) | [
"def",
"absSymPath",
"(",
"path",
")",
":",
"curr_path",
"=",
"os",
".",
"getcwd",
"(",
")",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"path",
")",
")"
] | like os.path.abspath except it doesn't dereference symlinks | [
"like",
"os",
".",
"path",
".",
"abspath",
"except",
"it",
"doesn",
"t",
"dereference",
"symlinks"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/bioio.py#L199-L203 |
225,156 | DataBiosphere/toil | src/toil/lib/bioio.py | makePublicDir | def makePublicDir(dirName):
"""Makes a given subdirectory if it doesn't already exist, making sure it is public.
"""
if not os.path.exists(dirName):
os.mkdir(dirName)
os.chmod(dirName, 0o777)
return dirName | python | def makePublicDir(dirName):
if not os.path.exists(dirName):
os.mkdir(dirName)
os.chmod(dirName, 0o777)
return dirName | [
"def",
"makePublicDir",
"(",
"dirName",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dirName",
")",
":",
"os",
".",
"mkdir",
"(",
"dirName",
")",
"os",
".",
"chmod",
"(",
"dirName",
",",
"0o777",
")",
"return",
"dirName"
] | Makes a given subdirectory if it doesn't already exist, making sure it is public. | [
"Makes",
"a",
"given",
"subdirectory",
"if",
"it",
"doesn",
"t",
"already",
"exist",
"making",
"sure",
"it",
"is",
"public",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/bioio.py#L297-L303 |
225,157 | DataBiosphere/toil | src/toil/wdl/wdl_synthesis.py | write_AST | def write_AST(wdl_file, outdir=None):
'''
Writes a file with the AST for a wdl file in the outdir.
'''
if outdir is None:
outdir = os.getcwd()
with open(os.path.join(outdir, 'AST.out'), 'w') as f:
with open(wdl_file, 'r') as wdl:
wdl_string = wdl.read()
ast = wdl_parser.parse(wdl_string).ast()
f.write(ast.dumps(indent=2)) | python | def write_AST(wdl_file, outdir=None):
'''
Writes a file with the AST for a wdl file in the outdir.
'''
if outdir is None:
outdir = os.getcwd()
with open(os.path.join(outdir, 'AST.out'), 'w') as f:
with open(wdl_file, 'r') as wdl:
wdl_string = wdl.read()
ast = wdl_parser.parse(wdl_string).ast()
f.write(ast.dumps(indent=2)) | [
"def",
"write_AST",
"(",
"wdl_file",
",",
"outdir",
"=",
"None",
")",
":",
"if",
"outdir",
"is",
"None",
":",
"outdir",
"=",
"os",
".",
"getcwd",
"(",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"'AST.out'",
")",
... | Writes a file with the AST for a wdl file in the outdir. | [
"Writes",
"a",
"file",
"with",
"the",
"AST",
"for",
"a",
"wdl",
"file",
"in",
"the",
"outdir",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_synthesis.py#L976-L986 |
225,158 | DataBiosphere/toil | src/toil/wdl/wdl_synthesis.py | SynthesizeWDL.write_main | def write_main(self):
'''
Writes out a huge string representing the main section of the python
compiled toil script.
Currently looks at and writes 5 sections:
1. JSON Variables (includes importing and preparing files as tuples)
2. TSV Variables (includes importing and preparing files as tuples)
3. CSV Variables (includes importing and preparing files as tuples)
4. Wrapping each WDL "task" function as a toil job
5. List out children and encapsulated jobs by priority, then start job0.
This should create variable declarations necessary for function calls.
Map file paths appropriately and store them in the toil fileStore so
that they are persistent from job to job. Create job wrappers for toil.
And finally write out, and run the jobs in order of priority using the
addChild and encapsulate commands provided by toil.
:return: giant string containing the main def for the toil script.
'''
main_section = ''
# write out the main header
main_header = self.write_main_header()
main_section = main_section + main_header
# write out the workflow declarations
main_section = main_section + ' # WF Declarations\n'
wf_declarations_to_write = self.write_main_wfdeclarations()
main_section = main_section + wf_declarations_to_write
# write toil job wrappers with input vars
jobs_to_write = self.write_main_jobwrappers()
main_section = main_section + jobs_to_write
# loop to export all outputs to a cloud bucket
if self.destBucket:
main_destbucket = self.write_main_destbucket()
main_section = main_section + main_destbucket
return main_section | python | def write_main(self):
'''
Writes out a huge string representing the main section of the python
compiled toil script.
Currently looks at and writes 5 sections:
1. JSON Variables (includes importing and preparing files as tuples)
2. TSV Variables (includes importing and preparing files as tuples)
3. CSV Variables (includes importing and preparing files as tuples)
4. Wrapping each WDL "task" function as a toil job
5. List out children and encapsulated jobs by priority, then start job0.
This should create variable declarations necessary for function calls.
Map file paths appropriately and store them in the toil fileStore so
that they are persistent from job to job. Create job wrappers for toil.
And finally write out, and run the jobs in order of priority using the
addChild and encapsulate commands provided by toil.
:return: giant string containing the main def for the toil script.
'''
main_section = ''
# write out the main header
main_header = self.write_main_header()
main_section = main_section + main_header
# write out the workflow declarations
main_section = main_section + ' # WF Declarations\n'
wf_declarations_to_write = self.write_main_wfdeclarations()
main_section = main_section + wf_declarations_to_write
# write toil job wrappers with input vars
jobs_to_write = self.write_main_jobwrappers()
main_section = main_section + jobs_to_write
# loop to export all outputs to a cloud bucket
if self.destBucket:
main_destbucket = self.write_main_destbucket()
main_section = main_section + main_destbucket
return main_section | [
"def",
"write_main",
"(",
"self",
")",
":",
"main_section",
"=",
"''",
"# write out the main header",
"main_header",
"=",
"self",
".",
"write_main_header",
"(",
")",
"main_section",
"=",
"main_section",
"+",
"main_header",
"# write out the workflow declarations",
"main_... | Writes out a huge string representing the main section of the python
compiled toil script.
Currently looks at and writes 5 sections:
1. JSON Variables (includes importing and preparing files as tuples)
2. TSV Variables (includes importing and preparing files as tuples)
3. CSV Variables (includes importing and preparing files as tuples)
4. Wrapping each WDL "task" function as a toil job
5. List out children and encapsulated jobs by priority, then start job0.
This should create variable declarations necessary for function calls.
Map file paths appropriately and store them in the toil fileStore so
that they are persistent from job to job. Create job wrappers for toil.
And finally write out, and run the jobs in order of priority using the
addChild and encapsulate commands provided by toil.
:return: giant string containing the main def for the toil script. | [
"Writes",
"out",
"a",
"huge",
"string",
"representing",
"the",
"main",
"section",
"of",
"the",
"python",
"compiled",
"toil",
"script",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_synthesis.py#L123-L164 |
225,159 | DataBiosphere/toil | src/toil/wdl/wdl_synthesis.py | SynthesizeWDL.write_main_jobwrappers | def write_main_jobwrappers(self):
'''
Writes out 'jobs' as wrapped toil objects in preparation for calling.
:return: A string representing this.
'''
main_section = ''
# toil cannot technically start with multiple jobs, so an empty
# 'initialize_jobs' function is always called first to get around this
main_section = main_section + '\n job0 = Job.wrapJobFn(initialize_jobs)\n'
# declare each job in main as a wrapped toil function in order of priority
for wf in self.workflows_dictionary:
for assignment in self.workflows_dictionary[wf]:
if assignment.startswith('call'):
main_section += ' job0 = job0.encapsulate()\n'
main_section += self.write_main_jobwrappers_call(self.workflows_dictionary[wf][assignment])
if assignment.startswith('scatter'):
main_section += ' job0 = job0.encapsulate()\n'
main_section += self.write_main_jobwrappers_scatter(self.workflows_dictionary[wf][assignment],
assignment)
if assignment.startswith('if'):
main_section += ' if {}:\n'.format(self.workflows_dictionary[wf][assignment]['expression'])
main_section += self.write_main_jobwrappers_if(self.workflows_dictionary[wf][assignment]['body'])
main_section += '\n fileStore.start(job0)\n'
return main_section | python | def write_main_jobwrappers(self):
'''
Writes out 'jobs' as wrapped toil objects in preparation for calling.
:return: A string representing this.
'''
main_section = ''
# toil cannot technically start with multiple jobs, so an empty
# 'initialize_jobs' function is always called first to get around this
main_section = main_section + '\n job0 = Job.wrapJobFn(initialize_jobs)\n'
# declare each job in main as a wrapped toil function in order of priority
for wf in self.workflows_dictionary:
for assignment in self.workflows_dictionary[wf]:
if assignment.startswith('call'):
main_section += ' job0 = job0.encapsulate()\n'
main_section += self.write_main_jobwrappers_call(self.workflows_dictionary[wf][assignment])
if assignment.startswith('scatter'):
main_section += ' job0 = job0.encapsulate()\n'
main_section += self.write_main_jobwrappers_scatter(self.workflows_dictionary[wf][assignment],
assignment)
if assignment.startswith('if'):
main_section += ' if {}:\n'.format(self.workflows_dictionary[wf][assignment]['expression'])
main_section += self.write_main_jobwrappers_if(self.workflows_dictionary[wf][assignment]['body'])
main_section += '\n fileStore.start(job0)\n'
return main_section | [
"def",
"write_main_jobwrappers",
"(",
"self",
")",
":",
"main_section",
"=",
"''",
"# toil cannot technically start with multiple jobs, so an empty",
"# 'initialize_jobs' function is always called first to get around this",
"main_section",
"=",
"main_section",
"+",
"'\\n job0 = ... | Writes out 'jobs' as wrapped toil objects in preparation for calling.
:return: A string representing this. | [
"Writes",
"out",
"jobs",
"as",
"wrapped",
"toil",
"objects",
"in",
"preparation",
"for",
"calling",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_synthesis.py#L201-L229 |
225,160 | DataBiosphere/toil | src/toil/wdl/wdl_synthesis.py | SynthesizeWDL.write_scatterfunction | def write_scatterfunction(self, job, scattername):
'''
Writes out a python function for each WDL "scatter" object.
'''
scatter_outputs = self.fetch_scatter_outputs(job)
# write the function header
fn_section = self.write_scatterfunction_header(scattername)
# write the scatter definitions
fn_section += self.write_scatterfunction_lists(scatter_outputs)
# write
fn_section += self.write_scatterfunction_loop(job, scatter_outputs)
# write the outputs for the task to return
fn_section += self.write_scatterfunction_outputreturn(scatter_outputs)
return fn_section | python | def write_scatterfunction(self, job, scattername):
'''
Writes out a python function for each WDL "scatter" object.
'''
scatter_outputs = self.fetch_scatter_outputs(job)
# write the function header
fn_section = self.write_scatterfunction_header(scattername)
# write the scatter definitions
fn_section += self.write_scatterfunction_lists(scatter_outputs)
# write
fn_section += self.write_scatterfunction_loop(job, scatter_outputs)
# write the outputs for the task to return
fn_section += self.write_scatterfunction_outputreturn(scatter_outputs)
return fn_section | [
"def",
"write_scatterfunction",
"(",
"self",
",",
"job",
",",
"scattername",
")",
":",
"scatter_outputs",
"=",
"self",
".",
"fetch_scatter_outputs",
"(",
"job",
")",
"# write the function header",
"fn_section",
"=",
"self",
".",
"write_scatterfunction_header",
"(",
... | Writes out a python function for each WDL "scatter" object. | [
"Writes",
"out",
"a",
"python",
"function",
"for",
"each",
"WDL",
"scatter",
"object",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_synthesis.py#L424-L443 |
225,161 | DataBiosphere/toil | src/toil/wdl/wdl_synthesis.py | SynthesizeWDL.write_function_bashscriptline | def write_function_bashscriptline(self, job):
'''
Writes a function to create a bashscript for injection into the docker
container.
:param job_task_reference: The job referenced in WDL's Task section.
:param job_alias: The actual job name to be written.
:return: A string writing all of this.
'''
fn_section = " generate_docker_bashscript_file(temp_dir=tempDir, docker_dir=tempDir, globs=["
# TODO: Add glob
# if 'outputs' in self.tasks_dictionary[job]:
# for output in self.tasks_dictionary[job]['outputs']:
# fn_section += '({}), '.format(output[2])
if fn_section.endswith(', '):
fn_section = fn_section[:-2]
fn_section += "], cmd=cmd, job_name='{}')\n\n".format(str(job))
return fn_section | python | def write_function_bashscriptline(self, job):
'''
Writes a function to create a bashscript for injection into the docker
container.
:param job_task_reference: The job referenced in WDL's Task section.
:param job_alias: The actual job name to be written.
:return: A string writing all of this.
'''
fn_section = " generate_docker_bashscript_file(temp_dir=tempDir, docker_dir=tempDir, globs=["
# TODO: Add glob
# if 'outputs' in self.tasks_dictionary[job]:
# for output in self.tasks_dictionary[job]['outputs']:
# fn_section += '({}), '.format(output[2])
if fn_section.endswith(', '):
fn_section = fn_section[:-2]
fn_section += "], cmd=cmd, job_name='{}')\n\n".format(str(job))
return fn_section | [
"def",
"write_function_bashscriptline",
"(",
"self",
",",
"job",
")",
":",
"fn_section",
"=",
"\" generate_docker_bashscript_file(temp_dir=tempDir, docker_dir=tempDir, globs=[\"",
"# TODO: Add glob",
"# if 'outputs' in self.tasks_dictionary[job]:",
"# for output in self.tasks_di... | Writes a function to create a bashscript for injection into the docker
container.
:param job_task_reference: The job referenced in WDL's Task section.
:param job_alias: The actual job name to be written.
:return: A string writing all of this. | [
"Writes",
"a",
"function",
"to",
"create",
"a",
"bashscript",
"for",
"injection",
"into",
"the",
"docker",
"container",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_synthesis.py#L709-L727 |
225,162 | DataBiosphere/toil | src/toil/wdl/wdl_synthesis.py | SynthesizeWDL.write_python_file | def write_python_file(self,
module_section,
fn_section,
main_section,
output_file):
'''
Just takes three strings and writes them to output_file.
:param module_section: A string of 'import modules'.
:param fn_section: A string of python 'def functions()'.
:param main_section: A string declaring toil options and main's header.
:param job_section: A string import files into toil and declaring jobs.
:param output_file: The file to write the compiled toil script to.
'''
with open(output_file, 'w') as file:
file.write(module_section)
file.write(fn_section)
file.write(main_section) | python | def write_python_file(self,
module_section,
fn_section,
main_section,
output_file):
'''
Just takes three strings and writes them to output_file.
:param module_section: A string of 'import modules'.
:param fn_section: A string of python 'def functions()'.
:param main_section: A string declaring toil options and main's header.
:param job_section: A string import files into toil and declaring jobs.
:param output_file: The file to write the compiled toil script to.
'''
with open(output_file, 'w') as file:
file.write(module_section)
file.write(fn_section)
file.write(main_section) | [
"def",
"write_python_file",
"(",
"self",
",",
"module_section",
",",
"fn_section",
",",
"main_section",
",",
"output_file",
")",
":",
"with",
"open",
"(",
"output_file",
",",
"'w'",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"module_section",
")",
... | Just takes three strings and writes them to output_file.
:param module_section: A string of 'import modules'.
:param fn_section: A string of python 'def functions()'.
:param main_section: A string declaring toil options and main's header.
:param job_section: A string import files into toil and declaring jobs.
:param output_file: The file to write the compiled toil script to. | [
"Just",
"takes",
"three",
"strings",
"and",
"writes",
"them",
"to",
"output_file",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_synthesis.py#L879-L896 |
225,163 | DataBiosphere/toil | src/toil/batchSystems/mesos/batchSystem.py | MesosBatchSystem._buildExecutor | def _buildExecutor(self):
"""
Creates and returns an ExecutorInfo-shaped object representing our executor implementation.
"""
# The executor program is installed as a setuptools entry point by setup.py
info = addict.Dict()
info.name = "toil"
info.command.value = resolveEntryPoint('_toil_mesos_executor')
info.executor_id.value = "toil-%i" % os.getpid()
info.source = pwd.getpwuid(os.getuid()).pw_name
return info | python | def _buildExecutor(self):
# The executor program is installed as a setuptools entry point by setup.py
info = addict.Dict()
info.name = "toil"
info.command.value = resolveEntryPoint('_toil_mesos_executor')
info.executor_id.value = "toil-%i" % os.getpid()
info.source = pwd.getpwuid(os.getuid()).pw_name
return info | [
"def",
"_buildExecutor",
"(",
"self",
")",
":",
"# The executor program is installed as a setuptools entry point by setup.py",
"info",
"=",
"addict",
".",
"Dict",
"(",
")",
"info",
".",
"name",
"=",
"\"toil\"",
"info",
".",
"command",
".",
"value",
"=",
"resolveEntr... | Creates and returns an ExecutorInfo-shaped object representing our executor implementation. | [
"Creates",
"and",
"returns",
"an",
"ExecutorInfo",
"-",
"shaped",
"object",
"representing",
"our",
"executor",
"implementation",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/mesos/batchSystem.py#L299-L309 |
225,164 | DataBiosphere/toil | src/toil/batchSystems/mesos/batchSystem.py | MesosBatchSystem._startDriver | def _startDriver(self):
"""
The Mesos driver thread which handles the scheduler's communication with the Mesos master
"""
framework = addict.Dict()
framework.user = getpass.getuser() # We must determine the user name ourselves with pymesos
framework.name = "toil"
framework.principal = framework.name
# Make the driver which implements most of the scheduler logic and calls back to us for the user-defined parts.
# Make sure it will call us with nice namespace-y addicts
self.driver = MesosSchedulerDriver(self, framework,
self._resolveAddress(self.mesosMasterAddress),
use_addict=True, implicit_acknowledgements=True)
self.driver.start() | python | def _startDriver(self):
framework = addict.Dict()
framework.user = getpass.getuser() # We must determine the user name ourselves with pymesos
framework.name = "toil"
framework.principal = framework.name
# Make the driver which implements most of the scheduler logic and calls back to us for the user-defined parts.
# Make sure it will call us with nice namespace-y addicts
self.driver = MesosSchedulerDriver(self, framework,
self._resolveAddress(self.mesosMasterAddress),
use_addict=True, implicit_acknowledgements=True)
self.driver.start() | [
"def",
"_startDriver",
"(",
"self",
")",
":",
"framework",
"=",
"addict",
".",
"Dict",
"(",
")",
"framework",
".",
"user",
"=",
"getpass",
".",
"getuser",
"(",
")",
"# We must determine the user name ourselves with pymesos",
"framework",
".",
"name",
"=",
"\"toi... | The Mesos driver thread which handles the scheduler's communication with the Mesos master | [
"The",
"Mesos",
"driver",
"thread",
"which",
"handles",
"the",
"scheduler",
"s",
"communication",
"with",
"the",
"Mesos",
"master"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/mesos/batchSystem.py#L311-L324 |
225,165 | DataBiosphere/toil | src/toil/batchSystems/mesos/batchSystem.py | MesosBatchSystem.registered | def registered(self, driver, frameworkId, masterInfo):
"""
Invoked when the scheduler successfully registers with a Mesos master
"""
log.debug("Registered with framework ID %s", frameworkId.value)
# Save the framework ID
self.frameworkId = frameworkId.value | python | def registered(self, driver, frameworkId, masterInfo):
log.debug("Registered with framework ID %s", frameworkId.value)
# Save the framework ID
self.frameworkId = frameworkId.value | [
"def",
"registered",
"(",
"self",
",",
"driver",
",",
"frameworkId",
",",
"masterInfo",
")",
":",
"log",
".",
"debug",
"(",
"\"Registered with framework ID %s\"",
",",
"frameworkId",
".",
"value",
")",
"# Save the framework ID",
"self",
".",
"frameworkId",
"=",
... | Invoked when the scheduler successfully registers with a Mesos master | [
"Invoked",
"when",
"the",
"scheduler",
"successfully",
"registers",
"with",
"a",
"Mesos",
"master"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/mesos/batchSystem.py#L360-L366 |
225,166 | DataBiosphere/toil | src/toil/batchSystems/mesos/batchSystem.py | MesosBatchSystem._newMesosTask | def _newMesosTask(self, job, offer):
"""
Build the Mesos task object for a given the Toil job and Mesos offer
"""
task = addict.Dict()
task.task_id.value = str(job.jobID)
task.agent_id.value = offer.agent_id.value
task.name = job.name
task.data = encode_data(pickle.dumps(job))
task.executor = addict.Dict(self.executor)
task.resources = []
task.resources.append(addict.Dict())
cpus = task.resources[-1]
cpus.name = 'cpus'
cpus.type = 'SCALAR'
cpus.scalar.value = job.resources.cores
task.resources.append(addict.Dict())
disk = task.resources[-1]
disk.name = 'disk'
disk.type = 'SCALAR'
if toMiB(job.resources.disk) > 1:
disk.scalar.value = toMiB(job.resources.disk)
else:
log.warning("Job %s uses less disk than Mesos requires. Rounding %s up to 1 MiB.",
job.jobID, job.resources.disk)
disk.scalar.value = 1
task.resources.append(addict.Dict())
mem = task.resources[-1]
mem.name = 'mem'
mem.type = 'SCALAR'
if toMiB(job.resources.memory) > 1:
mem.scalar.value = toMiB(job.resources.memory)
else:
log.warning("Job %s uses less memory than Mesos requires. Rounding %s up to 1 MiB.",
job.jobID, job.resources.memory)
mem.scalar.value = 1
return task | python | def _newMesosTask(self, job, offer):
task = addict.Dict()
task.task_id.value = str(job.jobID)
task.agent_id.value = offer.agent_id.value
task.name = job.name
task.data = encode_data(pickle.dumps(job))
task.executor = addict.Dict(self.executor)
task.resources = []
task.resources.append(addict.Dict())
cpus = task.resources[-1]
cpus.name = 'cpus'
cpus.type = 'SCALAR'
cpus.scalar.value = job.resources.cores
task.resources.append(addict.Dict())
disk = task.resources[-1]
disk.name = 'disk'
disk.type = 'SCALAR'
if toMiB(job.resources.disk) > 1:
disk.scalar.value = toMiB(job.resources.disk)
else:
log.warning("Job %s uses less disk than Mesos requires. Rounding %s up to 1 MiB.",
job.jobID, job.resources.disk)
disk.scalar.value = 1
task.resources.append(addict.Dict())
mem = task.resources[-1]
mem.name = 'mem'
mem.type = 'SCALAR'
if toMiB(job.resources.memory) > 1:
mem.scalar.value = toMiB(job.resources.memory)
else:
log.warning("Job %s uses less memory than Mesos requires. Rounding %s up to 1 MiB.",
job.jobID, job.resources.memory)
mem.scalar.value = 1
return task | [
"def",
"_newMesosTask",
"(",
"self",
",",
"job",
",",
"offer",
")",
":",
"task",
"=",
"addict",
".",
"Dict",
"(",
")",
"task",
".",
"task_id",
".",
"value",
"=",
"str",
"(",
"job",
".",
"jobID",
")",
"task",
".",
"agent_id",
".",
"value",
"=",
"o... | Build the Mesos task object for a given the Toil job and Mesos offer | [
"Build",
"the",
"Mesos",
"task",
"object",
"for",
"a",
"given",
"the",
"Toil",
"job",
"and",
"Mesos",
"offer"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/mesos/batchSystem.py#L536-L576 |
225,167 | DataBiosphere/toil | src/toil/batchSystems/mesos/batchSystem.py | MesosBatchSystem.frameworkMessage | def frameworkMessage(self, driver, executorId, agentId, message):
"""
Invoked when an executor sends a message.
"""
# Take it out of base 64 encoding from Protobuf
message = decode_data(message)
log.debug('Got framework message from executor %s running on agent %s: %s',
executorId.value, agentId.value, message)
message = ast.literal_eval(message)
assert isinstance(message, dict)
# Handle the mandatory fields of a message
nodeAddress = message.pop('address')
executor = self._registerNode(nodeAddress, agentId.value)
# Handle optional message fields
for k, v in iteritems(message):
if k == 'nodeInfo':
assert isinstance(v, dict)
resources = [taskData for taskData in itervalues(self.runningJobMap)
if taskData.executorID == executorId.value]
requestedCores = sum(taskData.cores for taskData in resources)
requestedMemory = sum(taskData.memory for taskData in resources)
executor.nodeInfo = NodeInfo(requestedCores=requestedCores, requestedMemory=requestedMemory, **v)
self.executors[nodeAddress] = executor
else:
raise RuntimeError("Unknown message field '%s'." % k) | python | def frameworkMessage(self, driver, executorId, agentId, message):
# Take it out of base 64 encoding from Protobuf
message = decode_data(message)
log.debug('Got framework message from executor %s running on agent %s: %s',
executorId.value, agentId.value, message)
message = ast.literal_eval(message)
assert isinstance(message, dict)
# Handle the mandatory fields of a message
nodeAddress = message.pop('address')
executor = self._registerNode(nodeAddress, agentId.value)
# Handle optional message fields
for k, v in iteritems(message):
if k == 'nodeInfo':
assert isinstance(v, dict)
resources = [taskData for taskData in itervalues(self.runningJobMap)
if taskData.executorID == executorId.value]
requestedCores = sum(taskData.cores for taskData in resources)
requestedMemory = sum(taskData.memory for taskData in resources)
executor.nodeInfo = NodeInfo(requestedCores=requestedCores, requestedMemory=requestedMemory, **v)
self.executors[nodeAddress] = executor
else:
raise RuntimeError("Unknown message field '%s'." % k) | [
"def",
"frameworkMessage",
"(",
"self",
",",
"driver",
",",
"executorId",
",",
"agentId",
",",
"message",
")",
":",
"# Take it out of base 64 encoding from Protobuf",
"message",
"=",
"decode_data",
"(",
"message",
")",
"log",
".",
"debug",
"(",
"'Got framework messa... | Invoked when an executor sends a message. | [
"Invoked",
"when",
"an",
"executor",
"sends",
"a",
"message",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/mesos/batchSystem.py#L651-L677 |
225,168 | DataBiosphere/toil | src/toil/batchSystems/mesos/batchSystem.py | MesosBatchSystem._registerNode | def _registerNode(self, nodeAddress, agentId, nodePort=5051):
"""
Called when we get communication from an agent. Remembers the
information about the agent by address, and the agent address by agent
ID.
"""
executor = self.executors.get(nodeAddress)
if executor is None or executor.agentId != agentId:
executor = self.ExecutorInfo(nodeAddress=nodeAddress,
agentId=agentId,
nodeInfo=None,
lastSeen=time.time())
self.executors[nodeAddress] = executor
else:
executor.lastSeen = time.time()
# Record the IP under the agent id
self.agentsByID[agentId] = nodeAddress
return executor | python | def _registerNode(self, nodeAddress, agentId, nodePort=5051):
executor = self.executors.get(nodeAddress)
if executor is None or executor.agentId != agentId:
executor = self.ExecutorInfo(nodeAddress=nodeAddress,
agentId=agentId,
nodeInfo=None,
lastSeen=time.time())
self.executors[nodeAddress] = executor
else:
executor.lastSeen = time.time()
# Record the IP under the agent id
self.agentsByID[agentId] = nodeAddress
return executor | [
"def",
"_registerNode",
"(",
"self",
",",
"nodeAddress",
",",
"agentId",
",",
"nodePort",
"=",
"5051",
")",
":",
"executor",
"=",
"self",
".",
"executors",
".",
"get",
"(",
"nodeAddress",
")",
"if",
"executor",
"is",
"None",
"or",
"executor",
".",
"agent... | Called when we get communication from an agent. Remembers the
information about the agent by address, and the agent address by agent
ID. | [
"Called",
"when",
"we",
"get",
"communication",
"from",
"an",
"agent",
".",
"Remembers",
"the",
"information",
"about",
"the",
"agent",
"by",
"address",
"and",
"the",
"agent",
"address",
"by",
"agent",
"ID",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/mesos/batchSystem.py#L679-L698 |
225,169 | DataBiosphere/toil | src/toil/provisioners/node.py | Node.remainingBillingInterval | def remainingBillingInterval(self):
"""
If the node has a launch time, this function returns a floating point value
between 0 and 1.0 representing how far we are into the
current billing cycle for the given instance. If the return value is .25, we are one
quarter into the billing cycle, with three quarters remaining before we will be charged
again for that instance.
Assumes a billing cycle of one hour.
:return: Float from 0 -> 1.0 representing percentage of pre-paid time left in cycle.
"""
if self.launchTime:
now = datetime.datetime.utcnow()
delta = now - parse_iso_utc(self.launchTime)
return 1 - delta.total_seconds() / 3600.0 % 1.0
else:
return 1 | python | def remainingBillingInterval(self):
if self.launchTime:
now = datetime.datetime.utcnow()
delta = now - parse_iso_utc(self.launchTime)
return 1 - delta.total_seconds() / 3600.0 % 1.0
else:
return 1 | [
"def",
"remainingBillingInterval",
"(",
"self",
")",
":",
"if",
"self",
".",
"launchTime",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"delta",
"=",
"now",
"-",
"parse_iso_utc",
"(",
"self",
".",
"launchTime",
")",
"return",
"1... | If the node has a launch time, this function returns a floating point value
between 0 and 1.0 representing how far we are into the
current billing cycle for the given instance. If the return value is .25, we are one
quarter into the billing cycle, with three quarters remaining before we will be charged
again for that instance.
Assumes a billing cycle of one hour.
:return: Float from 0 -> 1.0 representing percentage of pre-paid time left in cycle. | [
"If",
"the",
"node",
"has",
"a",
"launch",
"time",
"this",
"function",
"returns",
"a",
"floating",
"point",
"value",
"between",
"0",
"and",
"1",
".",
"0",
"representing",
"how",
"far",
"we",
"are",
"into",
"the",
"current",
"billing",
"cycle",
"for",
"th... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/node.py#L52-L69 |
225,170 | DataBiosphere/toil | src/toil/provisioners/node.py | Node.copySshKeys | def copySshKeys(self, keyName):
""" Copy authorized_keys file to the core user from the keyName user."""
if keyName == 'core':
return # No point.
# Make sure that keys are there.
self._waitForSSHKeys(keyName=keyName)
# copy keys to core user so that the ssh calls will work
# - normal mechanism failed unless public key was in the google-ssh format
# - even so, the key wasn't copied correctly to the core account
keyFile = '/home/%s/.ssh/authorized_keys' % keyName
self.sshInstance('/usr/bin/sudo', '/usr/bin/cp', keyFile, '/home/core/.ssh', user=keyName)
self.sshInstance('/usr/bin/sudo', '/usr/bin/chown', 'core',
'/home/core/.ssh/authorized_keys', user=keyName) | python | def copySshKeys(self, keyName):
if keyName == 'core':
return # No point.
# Make sure that keys are there.
self._waitForSSHKeys(keyName=keyName)
# copy keys to core user so that the ssh calls will work
# - normal mechanism failed unless public key was in the google-ssh format
# - even so, the key wasn't copied correctly to the core account
keyFile = '/home/%s/.ssh/authorized_keys' % keyName
self.sshInstance('/usr/bin/sudo', '/usr/bin/cp', keyFile, '/home/core/.ssh', user=keyName)
self.sshInstance('/usr/bin/sudo', '/usr/bin/chown', 'core',
'/home/core/.ssh/authorized_keys', user=keyName) | [
"def",
"copySshKeys",
"(",
"self",
",",
"keyName",
")",
":",
"if",
"keyName",
"==",
"'core'",
":",
"return",
"# No point.",
"# Make sure that keys are there.",
"self",
".",
"_waitForSSHKeys",
"(",
"keyName",
"=",
"keyName",
")",
"# copy keys to core user so that the s... | Copy authorized_keys file to the core user from the keyName user. | [
"Copy",
"authorized_keys",
"file",
"to",
"the",
"core",
"user",
"from",
"the",
"keyName",
"user",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/node.py#L78-L92 |
225,171 | DataBiosphere/toil | src/toil/provisioners/node.py | Node.injectFile | def injectFile(self, fromFile, toFile, role):
"""
rysnc a file to the vm with the given role
"""
maxRetries = 10
for retry in range(maxRetries):
try:
self.coreRsync([fromFile, ":" + toFile], applianceName=role)
return True
except Exception as e:
logger.debug("Rsync to new node failed, trying again. Error message: %s" % e)
time.sleep(10*retry)
raise RuntimeError("Failed to inject file %s to %s with ip %s" % (fromFile, role, self.effectiveIP) ) | python | def injectFile(self, fromFile, toFile, role):
maxRetries = 10
for retry in range(maxRetries):
try:
self.coreRsync([fromFile, ":" + toFile], applianceName=role)
return True
except Exception as e:
logger.debug("Rsync to new node failed, trying again. Error message: %s" % e)
time.sleep(10*retry)
raise RuntimeError("Failed to inject file %s to %s with ip %s" % (fromFile, role, self.effectiveIP) ) | [
"def",
"injectFile",
"(",
"self",
",",
"fromFile",
",",
"toFile",
",",
"role",
")",
":",
"maxRetries",
"=",
"10",
"for",
"retry",
"in",
"range",
"(",
"maxRetries",
")",
":",
"try",
":",
"self",
".",
"coreRsync",
"(",
"[",
"fromFile",
",",
"\":\"",
"+... | rysnc a file to the vm with the given role | [
"rysnc",
"a",
"file",
"to",
"the",
"vm",
"with",
"the",
"given",
"role"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/node.py#L94-L106 |
225,172 | DataBiosphere/toil | src/toil/provisioners/node.py | Node._waitForSSHPort | def _waitForSSHPort(self):
"""
Wait until the instance represented by this box is accessible via SSH.
:return: the number of unsuccessful attempts to connect to the port before a the first
success
"""
logger.debug('Waiting for ssh port to open...')
for i in count():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.settimeout(a_short_time)
s.connect((self.effectiveIP, 22))
logger.debug('...ssh port open')
return i
except socket.error:
pass
finally:
s.close() | python | def _waitForSSHPort(self):
logger.debug('Waiting for ssh port to open...')
for i in count():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.settimeout(a_short_time)
s.connect((self.effectiveIP, 22))
logger.debug('...ssh port open')
return i
except socket.error:
pass
finally:
s.close() | [
"def",
"_waitForSSHPort",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Waiting for ssh port to open...'",
")",
"for",
"i",
"in",
"count",
"(",
")",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_... | Wait until the instance represented by this box is accessible via SSH.
:return: the number of unsuccessful attempts to connect to the port before a the first
success | [
"Wait",
"until",
"the",
"instance",
"represented",
"by",
"this",
"box",
"is",
"accessible",
"via",
"SSH",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/node.py#L174-L192 |
225,173 | DataBiosphere/toil | src/toil/provisioners/node.py | Node.sshInstance | def sshInstance(self, *args, **kwargs):
"""
Run a command on the instance.
Returns the binary output of the command.
"""
kwargs['collectStdout'] = True
return self.coreSSH(*args, **kwargs) | python | def sshInstance(self, *args, **kwargs):
kwargs['collectStdout'] = True
return self.coreSSH(*args, **kwargs) | [
"def",
"sshInstance",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'collectStdout'",
"]",
"=",
"True",
"return",
"self",
".",
"coreSSH",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Run a command on the instance.
Returns the binary output of the command. | [
"Run",
"a",
"command",
"on",
"the",
"instance",
".",
"Returns",
"the",
"binary",
"output",
"of",
"the",
"command",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/node.py#L204-L210 |
225,174 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | adjustEndingReservationForJob | def adjustEndingReservationForJob(reservation, jobShape, wallTime):
"""
Add a job to an ending reservation that ends at wallTime, splitting
the reservation if the job doesn't fill the entire timeslice.
"""
if jobShape.wallTime - wallTime < reservation.shape.wallTime:
# This job only partially fills one of the slices. Create a new slice.
reservation.shape, nS = split(reservation.shape, jobShape, jobShape.wallTime - wallTime)
nS.nReservation = reservation.nReservation
reservation.nReservation = nS
else:
# This job perfectly fits within the boundaries of the slices.
reservation.subtract(jobShape) | python | def adjustEndingReservationForJob(reservation, jobShape, wallTime):
if jobShape.wallTime - wallTime < reservation.shape.wallTime:
# This job only partially fills one of the slices. Create a new slice.
reservation.shape, nS = split(reservation.shape, jobShape, jobShape.wallTime - wallTime)
nS.nReservation = reservation.nReservation
reservation.nReservation = nS
else:
# This job perfectly fits within the boundaries of the slices.
reservation.subtract(jobShape) | [
"def",
"adjustEndingReservationForJob",
"(",
"reservation",
",",
"jobShape",
",",
"wallTime",
")",
":",
"if",
"jobShape",
".",
"wallTime",
"-",
"wallTime",
"<",
"reservation",
".",
"shape",
".",
"wallTime",
":",
"# This job only partially fills one of the slices. Create... | Add a job to an ending reservation that ends at wallTime, splitting
the reservation if the job doesn't fill the entire timeslice. | [
"Add",
"a",
"job",
"to",
"an",
"ending",
"reservation",
"that",
"ends",
"at",
"wallTime",
"splitting",
"the",
"reservation",
"if",
"the",
"job",
"doesn",
"t",
"fill",
"the",
"entire",
"timeslice",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L247-L259 |
225,175 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | split | def split(nodeShape, jobShape, wallTime):
"""
Partition a node allocation into two to fit the job, returning the
modified shape of the node and a new node reservation for
the extra time that the job didn't fill.
"""
return (Shape(wallTime,
nodeShape.memory - jobShape.memory,
nodeShape.cores - jobShape.cores,
nodeShape.disk - jobShape.disk,
nodeShape.preemptable),
NodeReservation(Shape(nodeShape.wallTime - wallTime,
nodeShape.memory,
nodeShape.cores,
nodeShape.disk,
nodeShape.preemptable))) | python | def split(nodeShape, jobShape, wallTime):
return (Shape(wallTime,
nodeShape.memory - jobShape.memory,
nodeShape.cores - jobShape.cores,
nodeShape.disk - jobShape.disk,
nodeShape.preemptable),
NodeReservation(Shape(nodeShape.wallTime - wallTime,
nodeShape.memory,
nodeShape.cores,
nodeShape.disk,
nodeShape.preemptable))) | [
"def",
"split",
"(",
"nodeShape",
",",
"jobShape",
",",
"wallTime",
")",
":",
"return",
"(",
"Shape",
"(",
"wallTime",
",",
"nodeShape",
".",
"memory",
"-",
"jobShape",
".",
"memory",
",",
"nodeShape",
".",
"cores",
"-",
"jobShape",
".",
"cores",
",",
... | Partition a node allocation into two to fit the job, returning the
modified shape of the node and a new node reservation for
the extra time that the job didn't fill. | [
"Partition",
"a",
"node",
"allocation",
"into",
"two",
"to",
"fit",
"the",
"job",
"returning",
"the",
"modified",
"shape",
"of",
"the",
"node",
"and",
"a",
"new",
"node",
"reservation",
"for",
"the",
"extra",
"time",
"that",
"the",
"job",
"didn",
"t",
"f... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L261-L276 |
225,176 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | BinPackedFit.binPack | def binPack(self, jobShapes):
"""Pack a list of jobShapes into the fewest nodes reasonable. Can be run multiple times."""
# TODO: Check for redundancy with batchsystems.mesos.JobQueue() sorting
logger.debug('Running bin packing for node shapes %s and %s job(s).',
self.nodeShapes, len(jobShapes))
# Sort in descending order from largest to smallest. The FFD like-strategy will pack the
# jobs in order from longest to shortest.
jobShapes.sort()
jobShapes.reverse()
assert len(jobShapes) == 0 or jobShapes[0] >= jobShapes[-1]
for jS in jobShapes:
self.addJobShape(jS) | python | def binPack(self, jobShapes):
# TODO: Check for redundancy with batchsystems.mesos.JobQueue() sorting
logger.debug('Running bin packing for node shapes %s and %s job(s).',
self.nodeShapes, len(jobShapes))
# Sort in descending order from largest to smallest. The FFD like-strategy will pack the
# jobs in order from longest to shortest.
jobShapes.sort()
jobShapes.reverse()
assert len(jobShapes) == 0 or jobShapes[0] >= jobShapes[-1]
for jS in jobShapes:
self.addJobShape(jS) | [
"def",
"binPack",
"(",
"self",
",",
"jobShapes",
")",
":",
"# TODO: Check for redundancy with batchsystems.mesos.JobQueue() sorting",
"logger",
".",
"debug",
"(",
"'Running bin packing for node shapes %s and %s job(s).'",
",",
"self",
".",
"nodeShapes",
",",
"len",
"(",
"jo... | Pack a list of jobShapes into the fewest nodes reasonable. Can be run multiple times. | [
"Pack",
"a",
"list",
"of",
"jobShapes",
"into",
"the",
"fewest",
"nodes",
"reasonable",
".",
"Can",
"be",
"run",
"multiple",
"times",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L65-L76 |
225,177 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | BinPackedFit.getRequiredNodes | def getRequiredNodes(self):
"""
Returns a dict from node shape to number of nodes required to run the packed jobs.
"""
return {nodeShape:len(self.nodeReservations[nodeShape]) for nodeShape in self.nodeShapes} | python | def getRequiredNodes(self):
return {nodeShape:len(self.nodeReservations[nodeShape]) for nodeShape in self.nodeShapes} | [
"def",
"getRequiredNodes",
"(",
"self",
")",
":",
"return",
"{",
"nodeShape",
":",
"len",
"(",
"self",
".",
"nodeReservations",
"[",
"nodeShape",
"]",
")",
"for",
"nodeShape",
"in",
"self",
".",
"nodeShapes",
"}"
] | Returns a dict from node shape to number of nodes required to run the packed jobs. | [
"Returns",
"a",
"dict",
"from",
"node",
"shape",
"to",
"number",
"of",
"nodes",
"required",
"to",
"run",
"the",
"packed",
"jobs",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L114-L118 |
225,178 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | NodeReservation.fits | def fits(self, jobShape):
"""Check if a job shape's resource requirements will fit within this allocation."""
return jobShape.memory <= self.shape.memory and \
jobShape.cores <= self.shape.cores and \
jobShape.disk <= self.shape.disk and \
(jobShape.preemptable or not self.shape.preemptable) | python | def fits(self, jobShape):
return jobShape.memory <= self.shape.memory and \
jobShape.cores <= self.shape.cores and \
jobShape.disk <= self.shape.disk and \
(jobShape.preemptable or not self.shape.preemptable) | [
"def",
"fits",
"(",
"self",
",",
"jobShape",
")",
":",
"return",
"jobShape",
".",
"memory",
"<=",
"self",
".",
"shape",
".",
"memory",
"and",
"jobShape",
".",
"cores",
"<=",
"self",
".",
"shape",
".",
"cores",
"and",
"jobShape",
".",
"disk",
"<=",
"s... | Check if a job shape's resource requirements will fit within this allocation. | [
"Check",
"if",
"a",
"job",
"shape",
"s",
"resource",
"requirements",
"will",
"fit",
"within",
"this",
"allocation",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L164-L169 |
225,179 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | NodeReservation.shapes | def shapes(self):
"""Get all time-slice shapes, in order, from this reservation on."""
shapes = []
curRes = self
while curRes is not None:
shapes.append(curRes.shape)
curRes = curRes.nReservation
return shapes | python | def shapes(self):
shapes = []
curRes = self
while curRes is not None:
shapes.append(curRes.shape)
curRes = curRes.nReservation
return shapes | [
"def",
"shapes",
"(",
"self",
")",
":",
"shapes",
"=",
"[",
"]",
"curRes",
"=",
"self",
"while",
"curRes",
"is",
"not",
"None",
":",
"shapes",
".",
"append",
"(",
"curRes",
".",
"shape",
")",
"curRes",
"=",
"curRes",
".",
"nReservation",
"return",
"s... | Get all time-slice shapes, in order, from this reservation on. | [
"Get",
"all",
"time",
"-",
"slice",
"shapes",
"in",
"order",
"from",
"this",
"reservation",
"on",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L171-L178 |
225,180 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | NodeReservation.subtract | def subtract(self, jobShape):
"""
Subtracts the resources necessary to run a jobShape from the reservation.
"""
self.shape = Shape(self.shape.wallTime,
self.shape.memory - jobShape.memory,
self.shape.cores - jobShape.cores,
self.shape.disk - jobShape.disk,
self.shape.preemptable) | python | def subtract(self, jobShape):
self.shape = Shape(self.shape.wallTime,
self.shape.memory - jobShape.memory,
self.shape.cores - jobShape.cores,
self.shape.disk - jobShape.disk,
self.shape.preemptable) | [
"def",
"subtract",
"(",
"self",
",",
"jobShape",
")",
":",
"self",
".",
"shape",
"=",
"Shape",
"(",
"self",
".",
"shape",
".",
"wallTime",
",",
"self",
".",
"shape",
".",
"memory",
"-",
"jobShape",
".",
"memory",
",",
"self",
".",
"shape",
".",
"co... | Subtracts the resources necessary to run a jobShape from the reservation. | [
"Subtracts",
"the",
"resources",
"necessary",
"to",
"run",
"a",
"jobShape",
"from",
"the",
"reservation",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L180-L188 |
225,181 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | ClusterScaler.setStaticNodes | def setStaticNodes(self, nodes, preemptable):
"""
Used to track statically provisioned nodes. This method must be called
before any auto-scaled nodes are provisioned.
These nodes are treated differently than auto-scaled nodes in that they should
not be automatically terminated.
:param nodes: list of Node objects
"""
prefix = 'non-' if not preemptable else ''
logger.debug("Adding %s to %spreemptable static nodes", nodes, prefix)
if nodes is not None:
self.static[preemptable] = {node.privateIP : node for node in nodes} | python | def setStaticNodes(self, nodes, preemptable):
prefix = 'non-' if not preemptable else ''
logger.debug("Adding %s to %spreemptable static nodes", nodes, prefix)
if nodes is not None:
self.static[preemptable] = {node.privateIP : node for node in nodes} | [
"def",
"setStaticNodes",
"(",
"self",
",",
"nodes",
",",
"preemptable",
")",
":",
"prefix",
"=",
"'non-'",
"if",
"not",
"preemptable",
"else",
"''",
"logger",
".",
"debug",
"(",
"\"Adding %s to %spreemptable static nodes\"",
",",
"nodes",
",",
"prefix",
")",
"... | Used to track statically provisioned nodes. This method must be called
before any auto-scaled nodes are provisioned.
These nodes are treated differently than auto-scaled nodes in that they should
not be automatically terminated.
:param nodes: list of Node objects | [
"Used",
"to",
"track",
"statically",
"provisioned",
"nodes",
".",
"This",
"method",
"must",
"be",
"called",
"before",
"any",
"auto",
"-",
"scaled",
"nodes",
"are",
"provisioned",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L439-L452 |
225,182 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | ClusterScaler.smoothEstimate | def smoothEstimate(self, nodeShape, estimatedNodeCount):
"""
Smooth out fluctuations in the estimate for this node compared to
previous runs. Returns an integer.
"""
weightedEstimate = (1 - self.betaInertia) * estimatedNodeCount + \
self.betaInertia * self.previousWeightedEstimate[nodeShape]
self.previousWeightedEstimate[nodeShape] = weightedEstimate
return self._round(weightedEstimate) | python | def smoothEstimate(self, nodeShape, estimatedNodeCount):
weightedEstimate = (1 - self.betaInertia) * estimatedNodeCount + \
self.betaInertia * self.previousWeightedEstimate[nodeShape]
self.previousWeightedEstimate[nodeShape] = weightedEstimate
return self._round(weightedEstimate) | [
"def",
"smoothEstimate",
"(",
"self",
",",
"nodeShape",
",",
"estimatedNodeCount",
")",
":",
"weightedEstimate",
"=",
"(",
"1",
"-",
"self",
".",
"betaInertia",
")",
"*",
"estimatedNodeCount",
"+",
"self",
".",
"betaInertia",
"*",
"self",
".",
"previousWeighte... | Smooth out fluctuations in the estimate for this node compared to
previous runs. Returns an integer. | [
"Smooth",
"out",
"fluctuations",
"in",
"the",
"estimate",
"for",
"this",
"node",
"compared",
"to",
"previous",
"runs",
".",
"Returns",
"an",
"integer",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L463-L471 |
225,183 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | ClusterScaler.getEstimatedNodeCounts | def getEstimatedNodeCounts(self, queuedJobShapes, currentNodeCounts):
"""
Given the resource requirements of queued jobs and the current size of the cluster, returns
a dict mapping from nodeShape to the number of nodes we want in the cluster right now.
"""
nodesToRunQueuedJobs = binPacking(jobShapes=queuedJobShapes,
nodeShapes=self.nodeShapes,
goalTime=self.targetTime)
estimatedNodeCounts = {}
for nodeShape in self.nodeShapes:
nodeType = self.nodeShapeToType[nodeShape]
logger.debug("Nodes of type %s to run queued jobs = "
"%s" % (nodeType, nodesToRunQueuedJobs[nodeShape]))
# Actual calculation of the estimated number of nodes required
estimatedNodeCount = 0 if nodesToRunQueuedJobs[nodeShape] == 0 \
else max(1, self._round(nodesToRunQueuedJobs[nodeShape]))
logger.debug("Estimating %i nodes of shape %s" % (estimatedNodeCount, nodeShape))
# Use inertia parameter to smooth out fluctuations according to an exponentially
# weighted moving average.
estimatedNodeCount = self.smoothEstimate(nodeShape, estimatedNodeCount)
# If we're scaling a non-preemptable node type, we need to see if we have a
# deficit of preemptable nodes of this type that we should compensate for.
if not nodeShape.preemptable:
compensation = self.config.preemptableCompensation
assert 0.0 <= compensation <= 1.0
# The number of nodes we provision as compensation for missing preemptable
# nodes is the product of the deficit (the number of preemptable nodes we did
# _not_ allocate) and configuration preference.
compensationNodes = self._round(self.preemptableNodeDeficit[nodeType] * compensation)
if compensationNodes > 0:
logger.debug('Adding %d non-preemptable nodes of type %s to compensate for a '
'deficit of %d preemptable ones.', compensationNodes,
nodeType,
self.preemptableNodeDeficit[nodeType])
estimatedNodeCount += compensationNodes
logger.debug("Currently %i nodes of type %s in cluster" % (currentNodeCounts[nodeShape],
nodeType))
if self.leader.toilMetrics:
self.leader.toilMetrics.logClusterSize(nodeType=nodeType,
currentSize=currentNodeCounts[nodeShape],
desiredSize=estimatedNodeCount)
# Bound number using the max and min node parameters
if estimatedNodeCount > self.maxNodes[nodeShape]:
logger.debug('Limiting the estimated number of necessary %s (%s) to the '
'configured maximum (%s).', nodeType,
estimatedNodeCount,
self.maxNodes[nodeShape])
estimatedNodeCount = self.maxNodes[nodeShape]
elif estimatedNodeCount < self.minNodes[nodeShape]:
logger.debug('Raising the estimated number of necessary %s (%s) to the '
'configured minimum (%s).', nodeType,
estimatedNodeCount,
self.minNodes[nodeShape])
estimatedNodeCount = self.minNodes[nodeShape]
estimatedNodeCounts[nodeShape] = estimatedNodeCount
return estimatedNodeCounts | python | def getEstimatedNodeCounts(self, queuedJobShapes, currentNodeCounts):
nodesToRunQueuedJobs = binPacking(jobShapes=queuedJobShapes,
nodeShapes=self.nodeShapes,
goalTime=self.targetTime)
estimatedNodeCounts = {}
for nodeShape in self.nodeShapes:
nodeType = self.nodeShapeToType[nodeShape]
logger.debug("Nodes of type %s to run queued jobs = "
"%s" % (nodeType, nodesToRunQueuedJobs[nodeShape]))
# Actual calculation of the estimated number of nodes required
estimatedNodeCount = 0 if nodesToRunQueuedJobs[nodeShape] == 0 \
else max(1, self._round(nodesToRunQueuedJobs[nodeShape]))
logger.debug("Estimating %i nodes of shape %s" % (estimatedNodeCount, nodeShape))
# Use inertia parameter to smooth out fluctuations according to an exponentially
# weighted moving average.
estimatedNodeCount = self.smoothEstimate(nodeShape, estimatedNodeCount)
# If we're scaling a non-preemptable node type, we need to see if we have a
# deficit of preemptable nodes of this type that we should compensate for.
if not nodeShape.preemptable:
compensation = self.config.preemptableCompensation
assert 0.0 <= compensation <= 1.0
# The number of nodes we provision as compensation for missing preemptable
# nodes is the product of the deficit (the number of preemptable nodes we did
# _not_ allocate) and configuration preference.
compensationNodes = self._round(self.preemptableNodeDeficit[nodeType] * compensation)
if compensationNodes > 0:
logger.debug('Adding %d non-preemptable nodes of type %s to compensate for a '
'deficit of %d preemptable ones.', compensationNodes,
nodeType,
self.preemptableNodeDeficit[nodeType])
estimatedNodeCount += compensationNodes
logger.debug("Currently %i nodes of type %s in cluster" % (currentNodeCounts[nodeShape],
nodeType))
if self.leader.toilMetrics:
self.leader.toilMetrics.logClusterSize(nodeType=nodeType,
currentSize=currentNodeCounts[nodeShape],
desiredSize=estimatedNodeCount)
# Bound number using the max and min node parameters
if estimatedNodeCount > self.maxNodes[nodeShape]:
logger.debug('Limiting the estimated number of necessary %s (%s) to the '
'configured maximum (%s).', nodeType,
estimatedNodeCount,
self.maxNodes[nodeShape])
estimatedNodeCount = self.maxNodes[nodeShape]
elif estimatedNodeCount < self.minNodes[nodeShape]:
logger.debug('Raising the estimated number of necessary %s (%s) to the '
'configured minimum (%s).', nodeType,
estimatedNodeCount,
self.minNodes[nodeShape])
estimatedNodeCount = self.minNodes[nodeShape]
estimatedNodeCounts[nodeShape] = estimatedNodeCount
return estimatedNodeCounts | [
"def",
"getEstimatedNodeCounts",
"(",
"self",
",",
"queuedJobShapes",
",",
"currentNodeCounts",
")",
":",
"nodesToRunQueuedJobs",
"=",
"binPacking",
"(",
"jobShapes",
"=",
"queuedJobShapes",
",",
"nodeShapes",
"=",
"self",
".",
"nodeShapes",
",",
"goalTime",
"=",
... | Given the resource requirements of queued jobs and the current size of the cluster, returns
a dict mapping from nodeShape to the number of nodes we want in the cluster right now. | [
"Given",
"the",
"resource",
"requirements",
"of",
"queued",
"jobs",
"and",
"the",
"current",
"size",
"of",
"the",
"cluster",
"returns",
"a",
"dict",
"mapping",
"from",
"nodeShape",
"to",
"the",
"number",
"of",
"nodes",
"we",
"want",
"in",
"the",
"cluster",
... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L473-L533 |
225,184 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | ClusterScaler.setNodeCount | def setNodeCount(self, nodeType, numNodes, preemptable=False, force=False):
"""
Attempt to grow or shrink the number of preemptable or non-preemptable worker nodes in
the cluster to the given value, or as close a value as possible, and, after performing
the necessary additions or removals of worker nodes, return the resulting number of
preemptable or non-preemptable nodes currently in the cluster.
:param str nodeType: The node type to add or remove.
:param int numNodes: Desired size of the cluster
:param bool preemptable: whether the added nodes will be preemptable, i.e. whether they
may be removed spontaneously by the underlying platform at any time.
:param bool force: If False, the provisioner is allowed to deviate from the given number
of nodes. For example, when downsizing a cluster, a provisioner might leave nodes
running if they have active jobs running on them.
:rtype: int :return: the number of worker nodes in the cluster after making the necessary
adjustments. This value should be, but is not guaranteed to be, close or equal to
the `numNodes` argument. It represents the closest possible approximation of the
actual cluster size at the time this method returns.
"""
for attempt in retry(predicate=self.provisioner.retryPredicate):
with attempt:
workerInstances = self.getNodes(preemptable=preemptable)
logger.debug("Cluster contains %i instances" % len(workerInstances))
# Reduce to nodes of the correct type
workerInstances = {node:workerInstances[node] for node in workerInstances if node.nodeType == nodeType}
ignoredNodes = [node for node in workerInstances if node.privateIP in self.ignoredNodes]
numIgnoredNodes = len(ignoredNodes)
numCurrentNodes = len(workerInstances)
logger.debug("Cluster contains %i instances of type %s (%i ignored and draining jobs until "
"they can be safely terminated)" % (numCurrentNodes, nodeType, numIgnoredNodes))
if not force:
delta = numNodes - (numCurrentNodes - numIgnoredNodes)
else:
delta = numNodes - numCurrentNodes
if delta > 0 and numIgnoredNodes > 0:
# We can un-ignore a few nodes to compensate for the additional nodes we want.
numNodesToUnignore = min(delta, numIgnoredNodes)
logger.debug('Unignoring %i nodes because we want to scale back up again.' % numNodesToUnignore)
delta -= numNodesToUnignore
for node in ignoredNodes[:numNodesToUnignore]:
self.ignoredNodes.remove(node.privateIP)
self.leader.batchSystem.unignoreNode(node.privateIP)
if delta > 0:
logger.info('Adding %i %s nodes to get to desired cluster size of %i.',
delta,
'preemptable' if preemptable else 'non-preemptable',
numNodes)
numNodes = numCurrentNodes + self._addNodes(nodeType, numNodes=delta,
preemptable=preemptable)
elif delta < 0:
logger.info('Removing %i %s nodes to get to desired cluster size of %i.', -delta, 'preemptable' if preemptable else 'non-preemptable', numNodes)
numNodes = numCurrentNodes - self._removeNodes(workerInstances,
nodeType = nodeType,
numNodes=-delta,
preemptable=preemptable,
force=force)
else:
if not force:
logger.debug('Cluster (minus ignored nodes) already at desired size of %i. Nothing to do.', numNodes)
else:
logger.debug('Cluster already at desired size of %i. Nothing to do.', numNodes)
return numNodes | python | def setNodeCount(self, nodeType, numNodes, preemptable=False, force=False):
for attempt in retry(predicate=self.provisioner.retryPredicate):
with attempt:
workerInstances = self.getNodes(preemptable=preemptable)
logger.debug("Cluster contains %i instances" % len(workerInstances))
# Reduce to nodes of the correct type
workerInstances = {node:workerInstances[node] for node in workerInstances if node.nodeType == nodeType}
ignoredNodes = [node for node in workerInstances if node.privateIP in self.ignoredNodes]
numIgnoredNodes = len(ignoredNodes)
numCurrentNodes = len(workerInstances)
logger.debug("Cluster contains %i instances of type %s (%i ignored and draining jobs until "
"they can be safely terminated)" % (numCurrentNodes, nodeType, numIgnoredNodes))
if not force:
delta = numNodes - (numCurrentNodes - numIgnoredNodes)
else:
delta = numNodes - numCurrentNodes
if delta > 0 and numIgnoredNodes > 0:
# We can un-ignore a few nodes to compensate for the additional nodes we want.
numNodesToUnignore = min(delta, numIgnoredNodes)
logger.debug('Unignoring %i nodes because we want to scale back up again.' % numNodesToUnignore)
delta -= numNodesToUnignore
for node in ignoredNodes[:numNodesToUnignore]:
self.ignoredNodes.remove(node.privateIP)
self.leader.batchSystem.unignoreNode(node.privateIP)
if delta > 0:
logger.info('Adding %i %s nodes to get to desired cluster size of %i.',
delta,
'preemptable' if preemptable else 'non-preemptable',
numNodes)
numNodes = numCurrentNodes + self._addNodes(nodeType, numNodes=delta,
preemptable=preemptable)
elif delta < 0:
logger.info('Removing %i %s nodes to get to desired cluster size of %i.', -delta, 'preemptable' if preemptable else 'non-preemptable', numNodes)
numNodes = numCurrentNodes - self._removeNodes(workerInstances,
nodeType = nodeType,
numNodes=-delta,
preemptable=preemptable,
force=force)
else:
if not force:
logger.debug('Cluster (minus ignored nodes) already at desired size of %i. Nothing to do.', numNodes)
else:
logger.debug('Cluster already at desired size of %i. Nothing to do.', numNodes)
return numNodes | [
"def",
"setNodeCount",
"(",
"self",
",",
"nodeType",
",",
"numNodes",
",",
"preemptable",
"=",
"False",
",",
"force",
"=",
"False",
")",
":",
"for",
"attempt",
"in",
"retry",
"(",
"predicate",
"=",
"self",
".",
"provisioner",
".",
"retryPredicate",
")",
... | Attempt to grow or shrink the number of preemptable or non-preemptable worker nodes in
the cluster to the given value, or as close a value as possible, and, after performing
the necessary additions or removals of worker nodes, return the resulting number of
preemptable or non-preemptable nodes currently in the cluster.
:param str nodeType: The node type to add or remove.
:param int numNodes: Desired size of the cluster
:param bool preemptable: whether the added nodes will be preemptable, i.e. whether they
may be removed spontaneously by the underlying platform at any time.
:param bool force: If False, the provisioner is allowed to deviate from the given number
of nodes. For example, when downsizing a cluster, a provisioner might leave nodes
running if they have active jobs running on them.
:rtype: int :return: the number of worker nodes in the cluster after making the necessary
adjustments. This value should be, but is not guaranteed to be, close or equal to
the `numNodes` argument. It represents the closest possible approximation of the
actual cluster size at the time this method returns. | [
"Attempt",
"to",
"grow",
"or",
"shrink",
"the",
"number",
"of",
"preemptable",
"or",
"non",
"-",
"preemptable",
"worker",
"nodes",
"in",
"the",
"cluster",
"to",
"the",
"given",
"value",
"or",
"as",
"close",
"a",
"value",
"as",
"possible",
"and",
"after",
... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L565-L630 |
225,185 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | ClusterScaler.getNodes | def getNodes(self, preemptable):
"""
Returns a dictionary mapping node identifiers of preemptable or non-preemptable nodes to
NodeInfo objects, one for each node.
This method is the definitive source on nodes in cluster, & is responsible for consolidating
cluster state between the provisioner & batch system.
:param bool preemptable: If True (False) only (non-)preemptable nodes will be returned.
If None, all nodes will be returned.
:rtype: dict[Node, NodeInfo]
"""
def _getInfo(allMesosNodes, ip):
info = None
try:
info = allMesosNodes[ip]
except KeyError:
# never seen by mesos - 1 of 3 possibilities:
# 1) node is still launching mesos & will come online soon
# 2) no jobs have been assigned to this worker. This means the executor was never
# launched, so we don't even get an executorInfo back indicating 0 workers running
# 3) mesos crashed before launching, worker will never come online
# In all 3 situations it's safe to fake executor info with 0 workers, since in all
# cases there are no workers running.
info = NodeInfo(coresTotal=1, coresUsed=0, requestedCores=0,
memoryTotal=1, memoryUsed=0, requestedMemory=0,
workers=0)
else:
# Node was tracked but we haven't seen this in the last 10 minutes
inUse = self.leader.batchSystem.nodeInUse(ip)
if not inUse and info:
# The node hasn't reported in the last 10 minutes & last we know
# there weren't any tasks running. We will fake executorInfo with no
# worker to reflect this, since otherwise this node will never
# be considered for termination
info.workers = 0
else:
pass
# despite the node not reporting to mesos jobs may still be running
# so we can't terminate the node
return info
allMesosNodes = self.leader.batchSystem.getNodes(preemptable, timeout=None)
recentMesosNodes = self.leader.batchSystem.getNodes(preemptable)
provisionerNodes = self.provisioner.getProvisionedWorkers(nodeType=None, preemptable=preemptable)
if len(recentMesosNodes) != len(provisionerNodes):
logger.debug("Consolidating state between mesos and provisioner")
nodeToInfo = {}
# fixme: what happens if awsFilterImpairedNodes is used?
# if this assertion is false it means that user-managed nodes are being
# used that are outside the provisioner's control
# this would violate many basic assumptions in autoscaling so it currently not allowed
for node, ip in ((node, node.privateIP) for node in provisionerNodes):
info = None
if ip not in recentMesosNodes:
logger.debug("Worker node at %s is not reporting executor information", ip)
# we don't have up to date information about the node
info = _getInfo(allMesosNodes, ip)
else:
# mesos knows about the ip & we have up to date information - easy!
info = recentMesosNodes[ip]
# add info to dict to return
nodeToInfo[node] = info
return nodeToInfo | python | def getNodes(self, preemptable):
def _getInfo(allMesosNodes, ip):
info = None
try:
info = allMesosNodes[ip]
except KeyError:
# never seen by mesos - 1 of 3 possibilities:
# 1) node is still launching mesos & will come online soon
# 2) no jobs have been assigned to this worker. This means the executor was never
# launched, so we don't even get an executorInfo back indicating 0 workers running
# 3) mesos crashed before launching, worker will never come online
# In all 3 situations it's safe to fake executor info with 0 workers, since in all
# cases there are no workers running.
info = NodeInfo(coresTotal=1, coresUsed=0, requestedCores=0,
memoryTotal=1, memoryUsed=0, requestedMemory=0,
workers=0)
else:
# Node was tracked but we haven't seen this in the last 10 minutes
inUse = self.leader.batchSystem.nodeInUse(ip)
if not inUse and info:
# The node hasn't reported in the last 10 minutes & last we know
# there weren't any tasks running. We will fake executorInfo with no
# worker to reflect this, since otherwise this node will never
# be considered for termination
info.workers = 0
else:
pass
# despite the node not reporting to mesos jobs may still be running
# so we can't terminate the node
return info
allMesosNodes = self.leader.batchSystem.getNodes(preemptable, timeout=None)
recentMesosNodes = self.leader.batchSystem.getNodes(preemptable)
provisionerNodes = self.provisioner.getProvisionedWorkers(nodeType=None, preemptable=preemptable)
if len(recentMesosNodes) != len(provisionerNodes):
logger.debug("Consolidating state between mesos and provisioner")
nodeToInfo = {}
# fixme: what happens if awsFilterImpairedNodes is used?
# if this assertion is false it means that user-managed nodes are being
# used that are outside the provisioner's control
# this would violate many basic assumptions in autoscaling so it currently not allowed
for node, ip in ((node, node.privateIP) for node in provisionerNodes):
info = None
if ip not in recentMesosNodes:
logger.debug("Worker node at %s is not reporting executor information", ip)
# we don't have up to date information about the node
info = _getInfo(allMesosNodes, ip)
else:
# mesos knows about the ip & we have up to date information - easy!
info = recentMesosNodes[ip]
# add info to dict to return
nodeToInfo[node] = info
return nodeToInfo | [
"def",
"getNodes",
"(",
"self",
",",
"preemptable",
")",
":",
"def",
"_getInfo",
"(",
"allMesosNodes",
",",
"ip",
")",
":",
"info",
"=",
"None",
"try",
":",
"info",
"=",
"allMesosNodes",
"[",
"ip",
"]",
"except",
"KeyError",
":",
"# never seen by mesos - 1... | Returns a dictionary mapping node identifiers of preemptable or non-preemptable nodes to
NodeInfo objects, one for each node.
This method is the definitive source on nodes in cluster, & is responsible for consolidating
cluster state between the provisioner & batch system.
:param bool preemptable: If True (False) only (non-)preemptable nodes will be returned.
If None, all nodes will be returned.
:rtype: dict[Node, NodeInfo] | [
"Returns",
"a",
"dictionary",
"mapping",
"node",
"identifiers",
"of",
"preemptable",
"or",
"non",
"-",
"preemptable",
"nodes",
"to",
"NodeInfo",
"objects",
"one",
"for",
"each",
"node",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L727-L792 |
225,186 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | ScalerThread.check | def check(self):
"""
Attempt to join any existing scaler threads that may have died or finished. This insures
any exceptions raised in the threads are propagated in a timely fashion.
"""
try:
self.join(timeout=0)
except Exception as e:
logger.exception(e)
raise | python | def check(self):
try:
self.join(timeout=0)
except Exception as e:
logger.exception(e)
raise | [
"def",
"check",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"join",
"(",
"timeout",
"=",
"0",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"e",
")",
"raise"
] | Attempt to join any existing scaler threads that may have died or finished. This insures
any exceptions raised in the threads are propagated in a timely fashion. | [
"Attempt",
"to",
"join",
"any",
"existing",
"scaler",
"threads",
"that",
"may",
"have",
"died",
"or",
"finished",
".",
"This",
"insures",
"any",
"exceptions",
"raised",
"in",
"the",
"threads",
"are",
"propagated",
"in",
"a",
"timely",
"fashion",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L834-L843 |
225,187 | DataBiosphere/toil | src/toil/provisioners/clusterScaler.py | ScalerThread.shutdown | def shutdown(self):
"""
Shutdown the cluster.
"""
self.stop = True
if self.stats:
self.stats.shutDownStats()
self.join() | python | def shutdown(self):
self.stop = True
if self.stats:
self.stats.shutDownStats()
self.join() | [
"def",
"shutdown",
"(",
"self",
")",
":",
"self",
".",
"stop",
"=",
"True",
"if",
"self",
".",
"stats",
":",
"self",
".",
"stats",
".",
"shutDownStats",
"(",
")",
"self",
".",
"join",
"(",
")"
] | Shutdown the cluster. | [
"Shutdown",
"the",
"cluster",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L845-L852 |
225,188 | DataBiosphere/toil | src/toil/jobStores/abstractJobStore.py | AbstractJobStore.initialize | def initialize(self, config):
"""
Create the physical storage for this job store, allocate a workflow ID and persist the
given Toil configuration to the store.
:param toil.common.Config config: the Toil configuration to initialize this job store
with. The given configuration will be updated with the newly allocated workflow ID.
:raises JobStoreExistsException: if the physical storage for this job store already exists
"""
assert config.workflowID is None
config.workflowID = str(uuid4())
logger.debug("The workflow ID is: '%s'" % config.workflowID)
self.__config = config
self.writeConfig() | python | def initialize(self, config):
assert config.workflowID is None
config.workflowID = str(uuid4())
logger.debug("The workflow ID is: '%s'" % config.workflowID)
self.__config = config
self.writeConfig() | [
"def",
"initialize",
"(",
"self",
",",
"config",
")",
":",
"assert",
"config",
".",
"workflowID",
"is",
"None",
"config",
".",
"workflowID",
"=",
"str",
"(",
"uuid4",
"(",
")",
")",
"logger",
".",
"debug",
"(",
"\"The workflow ID is: '%s'\"",
"%",
"config"... | Create the physical storage for this job store, allocate a workflow ID and persist the
given Toil configuration to the store.
:param toil.common.Config config: the Toil configuration to initialize this job store
with. The given configuration will be updated with the newly allocated workflow ID.
:raises JobStoreExistsException: if the physical storage for this job store already exists | [
"Create",
"the",
"physical",
"storage",
"for",
"this",
"job",
"store",
"allocate",
"a",
"workflow",
"ID",
"and",
"persist",
"the",
"given",
"Toil",
"configuration",
"to",
"the",
"store",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/abstractJobStore.py#L123-L137 |
225,189 | DataBiosphere/toil | src/toil/jobStores/abstractJobStore.py | AbstractJobStore.setRootJob | def setRootJob(self, rootJobStoreID):
"""
Set the root job of the workflow backed by this job store
:param str rootJobStoreID: The ID of the job to set as root
"""
with self.writeSharedFileStream(self.rootJobStoreIDFileName) as f:
f.write(rootJobStoreID.encode('utf-8')) | python | def setRootJob(self, rootJobStoreID):
with self.writeSharedFileStream(self.rootJobStoreIDFileName) as f:
f.write(rootJobStoreID.encode('utf-8')) | [
"def",
"setRootJob",
"(",
"self",
",",
"rootJobStoreID",
")",
":",
"with",
"self",
".",
"writeSharedFileStream",
"(",
"self",
".",
"rootJobStoreIDFileName",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"rootJobStoreID",
".",
"encode",
"(",
"'utf-8'",
")",
... | Set the root job of the workflow backed by this job store
:param str rootJobStoreID: The ID of the job to set as root | [
"Set",
"the",
"root",
"job",
"of",
"the",
"workflow",
"backed",
"by",
"this",
"job",
"store"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/abstractJobStore.py#L170-L177 |
225,190 | DataBiosphere/toil | src/toil/jobStores/abstractJobStore.py | AbstractJobStore.loadRootJob | def loadRootJob(self):
"""
Loads the root job in the current job store.
:raises toil.job.JobException: If no root job is set or if the root job doesn't exist in
this job store
:return: The root job.
:rtype: toil.jobGraph.JobGraph
"""
try:
with self.readSharedFileStream(self.rootJobStoreIDFileName) as f:
rootJobStoreID = f.read().decode('utf-8')
except NoSuchFileException:
raise JobException('No job has been set as the root in this job store')
if not self.exists(rootJobStoreID):
raise JobException("The root job '%s' doesn't exist. Either the Toil workflow "
"is finished or has never been started" % rootJobStoreID)
return self.load(rootJobStoreID) | python | def loadRootJob(self):
try:
with self.readSharedFileStream(self.rootJobStoreIDFileName) as f:
rootJobStoreID = f.read().decode('utf-8')
except NoSuchFileException:
raise JobException('No job has been set as the root in this job store')
if not self.exists(rootJobStoreID):
raise JobException("The root job '%s' doesn't exist. Either the Toil workflow "
"is finished or has never been started" % rootJobStoreID)
return self.load(rootJobStoreID) | [
"def",
"loadRootJob",
"(",
"self",
")",
":",
"try",
":",
"with",
"self",
".",
"readSharedFileStream",
"(",
"self",
".",
"rootJobStoreIDFileName",
")",
"as",
"f",
":",
"rootJobStoreID",
"=",
"f",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
... | Loads the root job in the current job store.
:raises toil.job.JobException: If no root job is set or if the root job doesn't exist in
this job store
:return: The root job.
:rtype: toil.jobGraph.JobGraph | [
"Loads",
"the",
"root",
"job",
"in",
"the",
"current",
"job",
"store",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/abstractJobStore.py#L179-L196 |
225,191 | DataBiosphere/toil | src/toil/jobStores/abstractJobStore.py | AbstractJobStore.createRootJob | def createRootJob(self, *args, **kwargs):
"""
Create a new job and set it as the root job in this job store
:rtype: toil.jobGraph.JobGraph
"""
rootJob = self.create(*args, **kwargs)
self.setRootJob(rootJob.jobStoreID)
return rootJob | python | def createRootJob(self, *args, **kwargs):
rootJob = self.create(*args, **kwargs)
self.setRootJob(rootJob.jobStoreID)
return rootJob | [
"def",
"createRootJob",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"rootJob",
"=",
"self",
".",
"create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"setRootJob",
"(",
"rootJob",
".",
"jobStoreID",
")",
"ret... | Create a new job and set it as the root job in this job store
:rtype: toil.jobGraph.JobGraph | [
"Create",
"a",
"new",
"job",
"and",
"set",
"it",
"as",
"the",
"root",
"job",
"in",
"this",
"job",
"store"
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/abstractJobStore.py#L200-L208 |
225,192 | DataBiosphere/toil | src/toil/jobStores/abstractJobStore.py | AbstractJobStore._jobStoreClasses | def _jobStoreClasses(self):
"""
A list of concrete AbstractJobStore implementations whose dependencies are installed.
:rtype: list[AbstractJobStore]
"""
jobStoreClassNames = (
"toil.jobStores.azureJobStore.AzureJobStore",
"toil.jobStores.fileJobStore.FileJobStore",
"toil.jobStores.googleJobStore.GoogleJobStore",
"toil.jobStores.aws.jobStore.AWSJobStore",
"toil.jobStores.abstractJobStore.JobStoreSupport")
jobStoreClasses = []
for className in jobStoreClassNames:
moduleName, className = className.rsplit('.', 1)
from importlib import import_module
try:
module = import_module(moduleName)
except ImportError:
logger.debug("Unable to import '%s' as is expected if the corresponding extra was "
"omitted at installation time.", moduleName)
else:
jobStoreClass = getattr(module, className)
jobStoreClasses.append(jobStoreClass)
return jobStoreClasses | python | def _jobStoreClasses(self):
jobStoreClassNames = (
"toil.jobStores.azureJobStore.AzureJobStore",
"toil.jobStores.fileJobStore.FileJobStore",
"toil.jobStores.googleJobStore.GoogleJobStore",
"toil.jobStores.aws.jobStore.AWSJobStore",
"toil.jobStores.abstractJobStore.JobStoreSupport")
jobStoreClasses = []
for className in jobStoreClassNames:
moduleName, className = className.rsplit('.', 1)
from importlib import import_module
try:
module = import_module(moduleName)
except ImportError:
logger.debug("Unable to import '%s' as is expected if the corresponding extra was "
"omitted at installation time.", moduleName)
else:
jobStoreClass = getattr(module, className)
jobStoreClasses.append(jobStoreClass)
return jobStoreClasses | [
"def",
"_jobStoreClasses",
"(",
"self",
")",
":",
"jobStoreClassNames",
"=",
"(",
"\"toil.jobStores.azureJobStore.AzureJobStore\"",
",",
"\"toil.jobStores.fileJobStore.FileJobStore\"",
",",
"\"toil.jobStores.googleJobStore.GoogleJobStore\"",
",",
"\"toil.jobStores.aws.jobStore.AWSJobSt... | A list of concrete AbstractJobStore implementations whose dependencies are installed.
:rtype: list[AbstractJobStore] | [
"A",
"list",
"of",
"concrete",
"AbstractJobStore",
"implementations",
"whose",
"dependencies",
"are",
"installed",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/abstractJobStore.py#L222-L246 |
225,193 | DataBiosphere/toil | src/toil/jobStores/abstractJobStore.py | AbstractJobStore._findJobStoreForUrl | def _findJobStoreForUrl(self, url, export=False):
"""
Returns the AbstractJobStore subclass that supports the given URL.
:param urlparse.ParseResult url: The given URL
:param bool export: The URL for
:rtype: toil.jobStore.AbstractJobStore
"""
for jobStoreCls in self._jobStoreClasses:
if jobStoreCls._supportsUrl(url, export):
return jobStoreCls
raise RuntimeError("No job store implementation supports %sporting for URL '%s'" %
('ex' if export else 'im', url.geturl())) | python | def _findJobStoreForUrl(self, url, export=False):
for jobStoreCls in self._jobStoreClasses:
if jobStoreCls._supportsUrl(url, export):
return jobStoreCls
raise RuntimeError("No job store implementation supports %sporting for URL '%s'" %
('ex' if export else 'im', url.geturl())) | [
"def",
"_findJobStoreForUrl",
"(",
"self",
",",
"url",
",",
"export",
"=",
"False",
")",
":",
"for",
"jobStoreCls",
"in",
"self",
".",
"_jobStoreClasses",
":",
"if",
"jobStoreCls",
".",
"_supportsUrl",
"(",
"url",
",",
"export",
")",
":",
"return",
"jobSto... | Returns the AbstractJobStore subclass that supports the given URL.
:param urlparse.ParseResult url: The given URL
:param bool export: The URL for
:rtype: toil.jobStore.AbstractJobStore | [
"Returns",
"the",
"AbstractJobStore",
"subclass",
"that",
"supports",
"the",
"given",
"URL",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/abstractJobStore.py#L248-L260 |
225,194 | DataBiosphere/toil | src/toil/jobStores/abstractJobStore.py | AbstractJobStore.importFile | def importFile(self, srcUrl, sharedFileName=None, hardlink=False):
"""
Imports the file at the given URL into job store. The ID of the newly imported file is
returned. If the name of a shared file name is provided, the file will be imported as
such and None is returned.
Currently supported schemes are:
- 's3' for objects in Amazon S3
e.g. s3://bucket/key
- 'wasb' for blobs in Azure Blob Storage
e.g. wasb://container/blob
- 'file' for local files
e.g. file:///local/file/path
- 'http'
e.g. http://someurl.com/path
- 'gs'
e.g. gs://bucket/file
:param str srcUrl: URL that points to a file or object in the storage mechanism of a
supported URL scheme e.g. a blob in an Azure Blob Storage container.
:param str sharedFileName: Optional name to assign to the imported file within the job store
:return: The jobStoreFileId of the imported file or None if sharedFileName was given
:rtype: toil.fileStore.FileID or None
"""
# Note that the helper method _importFile is used to read from the source and write to
# destination (which is the current job store in this case). To implement any
# optimizations that circumvent this, the _importFile method should be overridden by
# subclasses of AbstractJobStore.
srcUrl = urlparse.urlparse(srcUrl)
otherCls = self._findJobStoreForUrl(srcUrl)
return self._importFile(otherCls, srcUrl, sharedFileName=sharedFileName, hardlink=hardlink) | python | def importFile(self, srcUrl, sharedFileName=None, hardlink=False):
# Note that the helper method _importFile is used to read from the source and write to
# destination (which is the current job store in this case). To implement any
# optimizations that circumvent this, the _importFile method should be overridden by
# subclasses of AbstractJobStore.
srcUrl = urlparse.urlparse(srcUrl)
otherCls = self._findJobStoreForUrl(srcUrl)
return self._importFile(otherCls, srcUrl, sharedFileName=sharedFileName, hardlink=hardlink) | [
"def",
"importFile",
"(",
"self",
",",
"srcUrl",
",",
"sharedFileName",
"=",
"None",
",",
"hardlink",
"=",
"False",
")",
":",
"# Note that the helper method _importFile is used to read from the source and write to",
"# destination (which is the current job store in this case). To i... | Imports the file at the given URL into job store. The ID of the newly imported file is
returned. If the name of a shared file name is provided, the file will be imported as
such and None is returned.
Currently supported schemes are:
- 's3' for objects in Amazon S3
e.g. s3://bucket/key
- 'wasb' for blobs in Azure Blob Storage
e.g. wasb://container/blob
- 'file' for local files
e.g. file:///local/file/path
- 'http'
e.g. http://someurl.com/path
- 'gs'
e.g. gs://bucket/file
:param str srcUrl: URL that points to a file or object in the storage mechanism of a
supported URL scheme e.g. a blob in an Azure Blob Storage container.
:param str sharedFileName: Optional name to assign to the imported file within the job store
:return: The jobStoreFileId of the imported file or None if sharedFileName was given
:rtype: toil.fileStore.FileID or None | [
"Imports",
"the",
"file",
"at",
"the",
"given",
"URL",
"into",
"job",
"store",
".",
"The",
"ID",
"of",
"the",
"newly",
"imported",
"file",
"is",
"returned",
".",
"If",
"the",
"name",
"of",
"a",
"shared",
"file",
"name",
"is",
"provided",
"the",
"file",... | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/abstractJobStore.py#L262-L299 |
225,195 | DataBiosphere/toil | src/toil/jobStores/abstractJobStore.py | AbstractJobStore._exportFile | def _exportFile(self, otherCls, jobStoreFileID, url):
"""
Refer to exportFile docstring for information about this method.
:param AbstractJobStore otherCls: The concrete subclass of AbstractJobStore that supports
exporting to the given URL. Note that the type annotation here is not completely
accurate. This is not an instance, it's a class, but there is no way to reflect
that in :pep:`484` type hints.
:param str jobStoreFileID: The id of the file that will be exported.
:param urlparse.ParseResult url: The parsed URL of the file to export to.
"""
with self.readFileStream(jobStoreFileID) as readable:
otherCls._writeToUrl(readable, url) | python | def _exportFile(self, otherCls, jobStoreFileID, url):
with self.readFileStream(jobStoreFileID) as readable:
otherCls._writeToUrl(readable, url) | [
"def",
"_exportFile",
"(",
"self",
",",
"otherCls",
",",
"jobStoreFileID",
",",
"url",
")",
":",
"with",
"self",
".",
"readFileStream",
"(",
"jobStoreFileID",
")",
"as",
"readable",
":",
"otherCls",
".",
"_writeToUrl",
"(",
"readable",
",",
"url",
")"
] | Refer to exportFile docstring for information about this method.
:param AbstractJobStore otherCls: The concrete subclass of AbstractJobStore that supports
exporting to the given URL. Note that the type annotation here is not completely
accurate. This is not an instance, it's a class, but there is no way to reflect
that in :pep:`484` type hints.
:param str jobStoreFileID: The id of the file that will be exported.
:param urlparse.ParseResult url: The parsed URL of the file to export to. | [
"Refer",
"to",
"exportFile",
"docstring",
"for",
"information",
"about",
"this",
"method",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/abstractJobStore.py#L346-L360 |
225,196 | DataBiosphere/toil | src/toil/lib/ec2nodes.py | parseStorage | def parseStorage(storageData):
"""
Parses EC2 JSON storage param string into a number.
Examples:
"2 x 160 SSD"
"3 x 2000 HDD"
"EBS only"
"1 x 410"
"8 x 1.9 NVMe SSD"
:param str storageData: EC2 JSON storage param string.
:return: Two floats representing: (# of disks), and (disk_capacity in GiB of each disk).
"""
if storageData == "EBS only":
return [0, 0]
else:
specs = storageData.strip().split()
if isNumber(specs[0]) and specs[1] == 'x' and isNumber(specs[2]):
return float(specs[0].replace(',', '')), float(specs[2].replace(',', ''))
else:
raise RuntimeError('EC2 JSON format has likely changed. Error parsing disk specs.') | python | def parseStorage(storageData):
if storageData == "EBS only":
return [0, 0]
else:
specs = storageData.strip().split()
if isNumber(specs[0]) and specs[1] == 'x' and isNumber(specs[2]):
return float(specs[0].replace(',', '')), float(specs[2].replace(',', ''))
else:
raise RuntimeError('EC2 JSON format has likely changed. Error parsing disk specs.') | [
"def",
"parseStorage",
"(",
"storageData",
")",
":",
"if",
"storageData",
"==",
"\"EBS only\"",
":",
"return",
"[",
"0",
",",
"0",
"]",
"else",
":",
"specs",
"=",
"storageData",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"isNumber",
"(",
"... | Parses EC2 JSON storage param string into a number.
Examples:
"2 x 160 SSD"
"3 x 2000 HDD"
"EBS only"
"1 x 410"
"8 x 1.9 NVMe SSD"
:param str storageData: EC2 JSON storage param string.
:return: Two floats representing: (# of disks), and (disk_capacity in GiB of each disk). | [
"Parses",
"EC2",
"JSON",
"storage",
"param",
"string",
"into",
"a",
"number",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/ec2nodes.py#L100-L121 |
225,197 | DataBiosphere/toil | src/toil/lib/ec2nodes.py | parseMemory | def parseMemory(memAttribute):
"""
Returns EC2 'memory' string as a float.
Format should always be '#' GiB (example: '244 GiB' or '1,952 GiB').
Amazon loves to put commas in their numbers, so we have to accommodate that.
If the syntax ever changes, this will raise.
:param memAttribute: EC2 JSON memory param string.
:return: A float representing memory in GiB.
"""
mem = memAttribute.replace(',', '').split()
if mem[1] == 'GiB':
return float(mem[0])
else:
raise RuntimeError('EC2 JSON format has likely changed. Error parsing memory.') | python | def parseMemory(memAttribute):
mem = memAttribute.replace(',', '').split()
if mem[1] == 'GiB':
return float(mem[0])
else:
raise RuntimeError('EC2 JSON format has likely changed. Error parsing memory.') | [
"def",
"parseMemory",
"(",
"memAttribute",
")",
":",
"mem",
"=",
"memAttribute",
".",
"replace",
"(",
"','",
",",
"''",
")",
".",
"split",
"(",
")",
"if",
"mem",
"[",
"1",
"]",
"==",
"'GiB'",
":",
"return",
"float",
"(",
"mem",
"[",
"0",
"]",
")"... | Returns EC2 'memory' string as a float.
Format should always be '#' GiB (example: '244 GiB' or '1,952 GiB').
Amazon loves to put commas in their numbers, so we have to accommodate that.
If the syntax ever changes, this will raise.
:param memAttribute: EC2 JSON memory param string.
:return: A float representing memory in GiB. | [
"Returns",
"EC2",
"memory",
"string",
"as",
"a",
"float",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/ec2nodes.py#L124-L139 |
225,198 | DataBiosphere/toil | src/toil/lib/ec2nodes.py | fetchEC2InstanceDict | def fetchEC2InstanceDict(regionNickname=None):
"""
Fetches EC2 instances types by region programmatically using the AWS pricing API.
See: https://aws.amazon.com/blogs/aws/new-aws-price-list-api/
:return: A dict of InstanceType objects, where the key is the string:
aws instance name (example: 't2.micro'), and the value is an
InstanceType object representing that aws instance name.
"""
ec2Source = 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json'
if regionNickname is None:
regionNickname = 'us-west-2'
region = EC2Regions[regionNickname] # JSON uses verbose region names as keys
ec2InstanceList = []
# summon the API to grab the latest instance types/prices/specs
response = requests.get(ec2Source)
if response.ok:
ec2InstanceList = parseEC2Json2List(jsontext=response.text, region=region)
if ec2InstanceList:
return dict((_.name, _) for _ in ec2InstanceList)
else:
from toil.lib import generatedEC2Lists as defaultEC2
return dict((_.name, _) for _ in defaultEC2.ec2InstancesByRegion[regionNickname]) | python | def fetchEC2InstanceDict(regionNickname=None):
ec2Source = 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json'
if regionNickname is None:
regionNickname = 'us-west-2'
region = EC2Regions[regionNickname] # JSON uses verbose region names as keys
ec2InstanceList = []
# summon the API to grab the latest instance types/prices/specs
response = requests.get(ec2Source)
if response.ok:
ec2InstanceList = parseEC2Json2List(jsontext=response.text, region=region)
if ec2InstanceList:
return dict((_.name, _) for _ in ec2InstanceList)
else:
from toil.lib import generatedEC2Lists as defaultEC2
return dict((_.name, _) for _ in defaultEC2.ec2InstancesByRegion[regionNickname]) | [
"def",
"fetchEC2InstanceDict",
"(",
"regionNickname",
"=",
"None",
")",
":",
"ec2Source",
"=",
"'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json'",
"if",
"regionNickname",
"is",
"None",
":",
"regionNickname",
"=",
"'us-west-2'",
"region",
... | Fetches EC2 instances types by region programmatically using the AWS pricing API.
See: https://aws.amazon.com/blogs/aws/new-aws-price-list-api/
:return: A dict of InstanceType objects, where the key is the string:
aws instance name (example: 't2.micro'), and the value is an
InstanceType object representing that aws instance name. | [
"Fetches",
"EC2",
"instances",
"types",
"by",
"region",
"programmatically",
"using",
"the",
"AWS",
"pricing",
"API",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/ec2nodes.py#L142-L168 |
225,199 | DataBiosphere/toil | src/toil/lib/ec2nodes.py | parseEC2Json2List | def parseEC2Json2List(jsontext, region):
"""
Takes a JSON and returns a list of InstanceType objects representing EC2 instance params.
:param jsontext:
:param region:
:return:
"""
currentList = json.loads(jsontext)
ec2InstanceList = []
for k, v in iteritems(currentList["products"]):
if "location" in v["attributes"] and v["attributes"]["location"] == region:
# 3 tenant types: 'Host' (always $0.00; just a template?)
# 'Dedicated' (toil does not support; these are pricier)
# 'Shared' (AWS default and what toil uses)
if "tenancy" in v["attributes"] and v["attributes"]["tenancy"] == "Shared":
if v["attributes"]["operatingSystem"] == "Linux":
# The same instance can appear with multiple "operation"
# values; "RunInstances" is normal, and
# "RunInstances:<code>" is e.g. Linux with MS SQL Server
# installed.
if v["attributes"]["operation"] == "RunInstances":
disks, disk_capacity = parseStorage(v["attributes"]["storage"])
memory = parseMemory(v["attributes"]["memory"])
instance = InstanceType(name=v["attributes"]["instanceType"],
cores=v["attributes"]["vcpu"],
memory=memory,
disks=disks,
disk_capacity=disk_capacity)
if instance not in ec2InstanceList:
ec2InstanceList.append(instance)
else:
raise RuntimeError('EC2 JSON format has likely changed. '
'Duplicate instance {} found.'.format(instance))
return ec2InstanceList | python | def parseEC2Json2List(jsontext, region):
currentList = json.loads(jsontext)
ec2InstanceList = []
for k, v in iteritems(currentList["products"]):
if "location" in v["attributes"] and v["attributes"]["location"] == region:
# 3 tenant types: 'Host' (always $0.00; just a template?)
# 'Dedicated' (toil does not support; these are pricier)
# 'Shared' (AWS default and what toil uses)
if "tenancy" in v["attributes"] and v["attributes"]["tenancy"] == "Shared":
if v["attributes"]["operatingSystem"] == "Linux":
# The same instance can appear with multiple "operation"
# values; "RunInstances" is normal, and
# "RunInstances:<code>" is e.g. Linux with MS SQL Server
# installed.
if v["attributes"]["operation"] == "RunInstances":
disks, disk_capacity = parseStorage(v["attributes"]["storage"])
memory = parseMemory(v["attributes"]["memory"])
instance = InstanceType(name=v["attributes"]["instanceType"],
cores=v["attributes"]["vcpu"],
memory=memory,
disks=disks,
disk_capacity=disk_capacity)
if instance not in ec2InstanceList:
ec2InstanceList.append(instance)
else:
raise RuntimeError('EC2 JSON format has likely changed. '
'Duplicate instance {} found.'.format(instance))
return ec2InstanceList | [
"def",
"parseEC2Json2List",
"(",
"jsontext",
",",
"region",
")",
":",
"currentList",
"=",
"json",
".",
"loads",
"(",
"jsontext",
")",
"ec2InstanceList",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"currentList",
"[",
"\"products\"",
"]",
... | Takes a JSON and returns a list of InstanceType objects representing EC2 instance params.
:param jsontext:
:param region:
:return: | [
"Takes",
"a",
"JSON",
"and",
"returns",
"a",
"list",
"of",
"InstanceType",
"objects",
"representing",
"EC2",
"instance",
"params",
"."
] | a8252277ff814e7bee0971139c2344f88e44b644 | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/ec2nodes.py#L171-L205 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.