after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __enter__(self):
"""
Derive configuration from the command line options, load the job store and, on restart,
consolidate the derived configuration with the one from the previous invocation of the
workflow.
"""
setLoggingFromOptions(self.options)
config = Config()
config.setOptions(self.options)
jobStore = self.getJobStore(config.jobStore)
if not config.restart:
config.workflowAttemptNumber = 0
jobStore.initialize(config)
else:
jobStore.resume()
# Merge configuration from job store with command line options
config = jobStore.config
config.setOptions(self.options)
config.workflowAttemptNumber += 1
jobStore.writeConfig()
self.config = config
self._jobStore = jobStore
self._inContextManager = True
return self
|
def __enter__(self):
"""
Derive configuration from the command line options, load the job store and, on restart,
consolidate the derived configuration with the one from the previous invocation of the
workflow.
"""
setLoggingFromOptions(self.options)
self._inContextManager = True
self.config = Config()
self.config.setOptions(self.options)
self._jobStore = self.loadOrCreateJobStore(
self.config.jobStore, config=None if self.config.restart else self.config
)
if self.config.restart:
# Reload configuration from job store
self.config = self._jobStore.config
self.config.setOptions(self.options)
self.config.workflowAttemptNumber += 1
self._jobStore.writeConfigToStore()
return self
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Clean up after a workflow invocation. Depending on the configuration, delete the job store.
"""
try:
if (
exc_type is not None
and self.config.clean == "onError"
or exc_type is None
and self.config.clean == "onSuccess"
or self.config.clean == "always"
):
logger.info("Attempting to delete the job store")
self._jobStore.destroy()
logger.info("Successfully deleted the job store")
except Exception as e:
if exc_type is None:
raise
else:
logger.exception("The following error was raised during clean up:")
self._inContextManager = False
return False # let exceptions through
|
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Clean up after a workflow invocation. Depending on the configuration, delete the job store.
"""
try:
if (
exc_type is not None
and self.config.clean == "onError"
or exc_type is None
and self.config.clean == "onSuccess"
or self.config.clean == "always"
):
logger.info("Attempting to delete the job store")
self._jobStore.deleteJobStore()
logger.info("Successfully deleted the job store")
except Exception as e:
if exc_type is None:
raise
else:
logger.exception("The following error was raised during clean up:")
self._inContextManager = False
return False # let exceptions through
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def __init__(self):
"""
Create an instance of the job store. The instance will not be fully functional until
either :meth:`.initialize` or :meth:`.resume` is invoked. Note that the :meth:`.destroy`
method may be invoked on the object with or without prior invocation of either of these two
methods.
"""
self.__config = None
|
def __init__(self, config=None):
"""
:param toil.common.Config config: If config is not None then the given configuration object will be written
to the shared file "config.pickle" which can later be retrieved using the
readSharedFileStream. See writeConfigToStore. If this file already exists it will be
overwritten. If config is None, the shared file "config.pickle" is assumed to exist
and is retrieved. See loadConfigFromStore.
"""
# Now get on with reading or writing the config
if config is None:
with self.readSharedFileStream("config.pickle") as fileHandle:
config = cPickle.load(fileHandle)
assert config.workflowID is not None
self.__config = config
else:
assert config.workflowID is None
config.workflowID = str(uuid4())
logger.info("The workflow ID is: '%s'" % config.workflowID)
self.__config = config
self.writeConfigToStore()
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def _importFile(self, otherCls, url, sharedFileName=None):
"""
Import the file at the given URL using the given job store class to retrieve that file.
See also :meth:`.importFile`. This method applies a generic approach to importing: it
asks the other job store class for a stream and writes that stream as eiher a regular or
a shared file.
:param AbstractJobStore otherCls: The concrete subclass of AbstractJobStore that supports
reading from the given URL.
:param urlparse.ParseResult url: The location of the file to import.
:param str sharedFileName: Optional name to assign to the imported file within the job store
:return The jobStoreFileId of imported file or None if sharedFileName was given
:rtype: str|None
"""
if sharedFileName is None:
with self.writeFileStream() as (writable, jobStoreFileID):
otherCls._readFromUrl(url, writable)
return jobStoreFileID
else:
self._requireValidSharedFileName(sharedFileName)
with self.writeSharedFileStream(sharedFileName) as writable:
otherCls._readFromUrl(url, writable)
return None
|
def _importFile(self, otherCls, url, sharedFileName=None):
"""
Import the file at the given URL using the given job store class to retrieve that file.
See also :meth:`importFile`. This method applies a generic approach to importing: it asks
the other job store class for a stream and writes that stream as eiher a regular or a
shared file.
:param AbstractJobStore otherCls: The concrete subclass of AbstractJobStore that supports
reading from the given URL.
:param urlparse.ParseResult url: The location of the file to import.
:param str sharedFileName: Optional name to assign to the imported file within the job store
:return The jobStoreFileId of imported file or None if sharedFileName was given
:rtype: str|None
"""
if sharedFileName is None:
with self.writeFileStream() as (writable, jobStoreFileID):
otherCls._readFromUrl(url, writable)
return jobStoreFileID
else:
self._requireValidSharedFileName(sharedFileName)
with self.writeSharedFileStream(sharedFileName) as writable:
otherCls._readFromUrl(url, writable)
return None
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def clean(self, jobCache=None):
"""
Function to cleanup the state of a job store after a restart.
Fixes jobs that might have been partially updated. Resets the try counts and removes jobs
that are not successors of the current root job.
:param dict[str,toil.jobWrapper.JobWrapper] jobCache: if a value it must be a dict
from job ID keys to JobWrapper object values. Jobs will be loaded from the cache
(which can be downloaded from the job store in a batch) instead of piecemeal when
recursed into.
"""
if jobCache is None:
logger.warning("Cleaning jobStore recursively. This may be slow.")
# Functions to get and check the existence of jobs, using the jobCache
# if present
def getJob(jobId):
if jobCache is not None:
try:
return jobCache[jobId]
except KeyError:
self.load(jobId)
else:
return self.load(jobId)
def haveJob(jobId):
if jobCache is not None:
if jobCache.has_key(jobId):
return True
else:
return self.exists(jobId)
else:
return self.exists(jobId)
def getJobs():
if jobCache is not None:
return jobCache.itervalues()
else:
return self.jobs()
# Iterate from the root jobWrapper and collate all jobs that are reachable from it
# All other jobs returned by self.jobs() are orphaned and can be removed
reachableFromRoot = set()
def getConnectedJobs(jobWrapper):
if jobWrapper.jobStoreID in reachableFromRoot:
return
reachableFromRoot.add(jobWrapper.jobStoreID)
# Traverse jobs in stack
for jobs in jobWrapper.stack:
for successorJobStoreID in map(lambda x: x[0], jobs):
if successorJobStoreID not in reachableFromRoot and haveJob(
successorJobStoreID
):
getConnectedJobs(getJob(successorJobStoreID))
# Traverse service jobs
for jobs in jobWrapper.services:
for serviceJobStoreID in map(lambda x: x[0], jobs):
if haveJob(serviceJobStoreID):
assert serviceJobStoreID not in reachableFromRoot
reachableFromRoot.add(serviceJobStoreID)
logger.info("Checking job graph connectivity...")
getConnectedJobs(self.loadRootJob())
logger.info("%d jobs reachable from root." % len(reachableFromRoot))
# Cleanup jobs that are not reachable from the root, and therefore orphaned
jobsToDelete = filter(lambda x: x.jobStoreID not in reachableFromRoot, getJobs())
for jobWrapper in jobsToDelete:
# clean up any associated files before deletion
for fileID in jobWrapper.filesToDelete:
# Delete any files that should already be deleted
logger.warn(
"Deleting file '%s'. It is marked for deletion but has not yet been "
"removed.",
fileID,
)
self.deleteFile(fileID)
# Delete the job
self.delete(jobWrapper.jobStoreID)
# Clean up jobs that are in reachable from the root
for jobWrapper in (getJob(x) for x in reachableFromRoot):
# jobWrappers here are necessarily in reachable from root.
changed = [False] # This is a flag to indicate the jobWrapper state has
# changed
# If the job has files to delete delete them.
if len(jobWrapper.filesToDelete) != 0:
# Delete any files that should already be deleted
for fileID in jobWrapper.filesToDelete:
logger.critical(
"Removing file in job store: %s that was "
"marked for deletion but not previously removed" % fileID
)
self.deleteFile(fileID)
jobWrapper.filesToDelete = []
changed[0] = True
# For a job whose command is already executed, remove jobs from the stack that are
# already deleted. This cleans up the case that the jobWrapper had successors to run,
# but had not been updated to reflect this.
if jobWrapper.command is None:
stackSizeFn = lambda: sum(map(len, jobWrapper.stack))
startStackSize = stackSizeFn()
# Remove deleted jobs
jobWrapper.stack = map(
lambda x: filter(lambda y: self.exists(y[0]), x), jobWrapper.stack
)
# Remove empty stuff from the stack
jobWrapper.stack = filter(lambda x: len(x) > 0, jobWrapper.stack)
# Check if anything got removed
if stackSizeFn() != startStackSize:
changed[0] = True
# Cleanup any services that have already been finished.
# Filter out deleted services and update the flags for services that exist
# If there are services then renew
# the start and terminate flags if they have been removed
def subFlagFile(jobStoreID, jobStoreFileID, flag):
if self.fileExists(jobStoreFileID):
return jobStoreFileID
# Make a new flag
newFlag = self.getEmptyFileStoreID()
# Load the jobWrapper for the service and initialise the link
serviceJobWrapper = getJob(jobStoreID)
if flag == 1:
logger.debug(
"Recreating a start service flag for job: %s, flag: %s",
jobStoreID,
newFlag,
)
serviceJobWrapper.startJobStoreID = newFlag
elif flag == 2:
logger.debug(
"Recreating a terminate service flag for job: %s, flag: %s",
jobStoreID,
newFlag,
)
serviceJobWrapper.terminateJobStoreID = newFlag
else:
logger.debug(
"Recreating a error service flag for job: %s, flag: %s",
jobStoreID,
newFlag,
)
assert flag == 3
serviceJobWrapper.errorJobStoreID = newFlag
# Update the service job on disk
self.update(serviceJobWrapper)
changed[0] = True
return newFlag
servicesSizeFn = lambda: sum(map(len, jobWrapper.services))
startServicesSize = servicesSizeFn()
jobWrapper.services = filter(
lambda z: len(z) > 0,
map(
lambda serviceJobList: map(
lambda x: x[:4]
+ (
subFlagFile(x[0], x[4], 1),
subFlagFile(x[0], x[5], 2),
subFlagFile(x[0], x[6], 3),
),
filter(lambda y: self.exists(y[0]), serviceJobList),
),
jobWrapper.services,
),
)
if servicesSizeFn() != startServicesSize:
changed[0] = True
# Reset the retry count of the jobWrapper
if jobWrapper.remainingRetryCount != self._defaultTryCount():
jobWrapper.remainingRetryCount = self._defaultTryCount()
changed[0] = True
# This cleans the old log file which may
# have been left if the jobWrapper is being retried after a jobWrapper failure.
if jobWrapper.logJobStoreFileID != None:
self.delete(jobWrapper.logJobStoreFileID)
jobWrapper.logJobStoreFileID = None
changed[0] = True
if changed[0]: # Update, but only if a change has occurred
logger.critical("Repairing job: %s" % jobWrapper.jobStoreID)
self.update(jobWrapper)
# Remove any crufty stats/logging files from the previous run
logger.info("Discarding old statistics and logs...")
self.readStatsAndLogging(lambda x: None)
logger.info("Job store is clean")
# TODO: reloading of the rootJob may be redundant here
return self.loadRootJob()
|
def clean(self, jobCache=None):
"""
Function to cleanup the state of a job store after a restart.
Fixes jobs that might have been partially updated. Resets the try counts and removes jobs
that are not successors of the current root job.
:param dict[str,toil.jobWrapper.JobWrapper] jobCache: if a value it must be a dict
from job ID keys to JobWrapper object values. Jobs will be loaded from the cache
(which can be downloaded from the job store in a batch) instead of piecemeal when
recursed into.
"""
if jobCache is None:
logger.warning("Cleaning jobStore recursively. This may be slow.")
# Functions to get and check the existence of jobs, using the jobCache
# if present
def getJob(jobId):
if jobCache is not None:
try:
return jobCache[jobId]
except KeyError:
self.load(jobId)
else:
return self.load(jobId)
def haveJob(jobId):
if jobCache is not None:
if jobCache.has_key(jobId):
return True
else:
return self.exists(jobId)
else:
return self.exists(jobId)
def getJobs():
if jobCache is not None:
return jobCache.itervalues()
else:
return self.jobs()
# Iterate from the root jobWrapper and collate all jobs that are reachable from it
# All other jobs returned by self.jobs() are orphaned and can be removed
reachableFromRoot = set()
def getConnectedJobs(jobWrapper):
if jobWrapper.jobStoreID in reachableFromRoot:
return
reachableFromRoot.add(jobWrapper.jobStoreID)
# Traverse jobs in stack
for jobs in jobWrapper.stack:
for successorJobStoreID in map(lambda x: x[0], jobs):
if successorJobStoreID not in reachableFromRoot and haveJob(
successorJobStoreID
):
getConnectedJobs(getJob(successorJobStoreID))
# Traverse service jobs
for jobs in jobWrapper.services:
for serviceJobStoreID in map(lambda x: x[0], jobs):
if haveJob(serviceJobStoreID):
assert serviceJobStoreID not in reachableFromRoot
reachableFromRoot.add(serviceJobStoreID)
logger.info("Checking job graph connectivity...")
getConnectedJobs(self.loadRootJob())
logger.info("%d jobs reachable from root." % len(reachableFromRoot))
# Cleanup jobs that are not reachable from the root, and therefore orphaned
jobsToDelete = filter(lambda x: x.jobStoreID not in reachableFromRoot, getJobs())
for jobWrapper in jobsToDelete:
# clean up any associated files before deletion
for fileID in jobWrapper.filesToDelete:
# Delete any files that should already be deleted
logger.critical(
"Removing file in job store: %s that was marked for deletion but not previously removed"
% fileID
)
self.deleteFile(fileID)
# Delete the job
self.delete(jobWrapper.jobStoreID)
# Clean up jobs that are in reachable from the root
for jobWrapper in (getJob(x) for x in reachableFromRoot):
# jobWrappers here are necessarily in reachable from root.
changed = [False] # This is a flag to indicate the jobWrapper state has
# changed
# If the job has files to delete delete them.
if len(jobWrapper.filesToDelete) != 0:
# Delete any files that should already be deleted
for fileID in jobWrapper.filesToDelete:
logger.critical(
"Removing file in job store: %s that was "
"marked for deletion but not previously removed" % fileID
)
self.deleteFile(fileID)
jobWrapper.filesToDelete = []
changed[0] = True
# For a job whose command is already executed, remove jobs from the
# stack that are already deleted.
# This cleans up the case that the jobWrapper
# had successors to run, but had not been updated to reflect this
if jobWrapper.command is None:
stackSizeFn = lambda: sum(map(len, jobWrapper.stack))
startStackSize = stackSizeFn()
# Remove deleted jobs
jobWrapper.stack = map(
lambda x: filter(lambda y: self.exists(y[0]), x), jobWrapper.stack
)
# Remove empty stuff from the stack
jobWrapper.stack = filter(lambda x: len(x) > 0, jobWrapper.stack)
# Check if anything got removed
if stackSizeFn() != startStackSize:
changed[0] = True
# Cleanup any services that have already been finished.
# Filter out deleted services and update the flags for services that exist
# If there are services then renew
# the start and terminate flags if they have been removed
def subFlagFile(jobStoreID, jobStoreFileID, flag):
if self.fileExists(jobStoreFileID):
return jobStoreFileID
# Make a new flag
newFlag = self.getEmptyFileStoreID()
# Load the jobWrapper for the service and initialise the link
serviceJobWrapper = getJob(jobStoreID)
if flag == 1:
logger.debug(
"Recreating a start service flag for job: %s, flag: %s",
jobStoreID,
newFlag,
)
serviceJobWrapper.startJobStoreID = newFlag
elif flag == 2:
logger.debug(
"Recreating a terminate service flag for job: %s, flag: %s",
jobStoreID,
newFlag,
)
serviceJobWrapper.terminateJobStoreID = newFlag
else:
logger.debug(
"Recreating a error service flag for job: %s, flag: %s",
jobStoreID,
newFlag,
)
assert flag == 3
serviceJobWrapper.errorJobStoreID = newFlag
# Update the service job on disk
self.update(serviceJobWrapper)
changed[0] = True
return newFlag
servicesSizeFn = lambda: sum(map(len, jobWrapper.services))
startServicesSize = servicesSizeFn()
jobWrapper.services = filter(
lambda z: len(z) > 0,
map(
lambda serviceJobList: map(
lambda x: x[:4]
+ (
subFlagFile(x[0], x[4], 1),
subFlagFile(x[0], x[5], 2),
subFlagFile(x[0], x[6], 3),
),
filter(lambda y: self.exists(y[0]), serviceJobList),
),
jobWrapper.services,
),
)
if servicesSizeFn() != startServicesSize:
changed[0] = True
# Reset the retry count of the jobWrapper
if jobWrapper.remainingRetryCount != self._defaultTryCount():
jobWrapper.remainingRetryCount = self._defaultTryCount()
changed[0] = True
# This cleans the old log file which may
# have been left if the jobWrapper is being retried after a jobWrapper failure.
if jobWrapper.logJobStoreFileID != None:
self.delete(jobWrapper.logJobStoreFileID)
jobWrapper.logJobStoreFileID = None
changed[0] = True
if changed[0]: # Update, but only if a change has occurred
logger.critical("Repairing job: %s" % jobWrapper.jobStoreID)
self.update(jobWrapper)
# Remove any crufty stats/logging files from the previous run
logger.info("Discarding old statistics and logs...")
self.readStatsAndLogging(lambda x: None)
logger.info("Job store is clean")
# TODO: reloading of the rootJob may be redundant here
return self.loadRootJob()
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def _getKeyForUrl(url, existing=None):
"""
Extracts a key from a given s3:// URL. On return, but not on exceptions, this method
leaks an S3Connection object. The caller is responsible to close that by calling
key.bucket.connection.close().
:param bool existing: If True, key is expected to exist. If False, key is expected not to
exists and it will be created. If None, the key will be created if it doesn't exist.
:rtype: Key
"""
# Get the bucket's region to avoid a redirect per request
try:
with closing(boto.connect_s3()) as s3:
location = s3.get_bucket(url.netloc).get_location()
region = bucket_location_to_region(location)
except S3ResponseError as e:
if e.error_code == "AccessDenied":
log.warn(
"Could not determine location of bucket hosting URL '%s', reverting "
"to generic S3 endpoint.",
url.geturl(),
)
s3 = boto.connect_s3()
else:
raise
else:
# Note that caller is responsible for closing the connection
s3 = boto.s3.connect_to_region(region)
try:
keyName = url.path[1:]
bucketName = url.netloc
bucket = s3.get_bucket(bucketName)
key = bucket.get_key(keyName)
if existing is True:
if key is None:
raise RuntimeError(
"Key '%s' does not exist in bucket '%s'." % (keyName, bucketName)
)
elif existing is False:
if key is not None:
raise RuntimeError(
"Key '%s' exists in bucket '%s'." % (keyName, bucketName)
)
elif existing is None:
pass
else:
assert False
if key is None:
key = bucket.new_key(keyName)
except:
with panic():
s3.close()
else:
return key
|
def _getKeyForUrl(url, existing=None):
"""
Extracts a key from a given s3:// URL. On return, but not on exceptions, this method
leaks an S3Connection object. The caller is responsible to close that by calling
key.bucket.connection.close().
:param bool existing: If True, key is expected to exist. If False, key is expected not to
exists and it will be created. If None, the key will be created if it doesn't exist.
:rtype: Key
"""
# Get the bucket's region to avoid a redirect per request
try:
with closing(boto.connect_s3()) as s3:
region = bucket_location_to_region(s3.get_bucket(url.netloc).get_location())
except S3ResponseError as e:
if e.error_code == "AccessDenied":
log.warn(
"Could not determine location of bucket hosting URL '%s', reverting "
"to generic S3 endpoint.",
url.geturl(),
)
s3 = boto.connect_s3()
else:
raise
else:
# Note that caller is responsible for closing the connection
s3 = boto.s3.connect_to_region(region)
try:
keyName = url.path[1:]
bucketName = url.netloc
bucket = s3.get_bucket(bucketName)
key = bucket.get_key(keyName)
if existing is True:
if key is None:
raise RuntimeError(
"Key '%s' does not exist in bucket '%s'." % (keyName, bucketName)
)
elif existing is False:
if key is not None:
raise RuntimeError(
"Key '%s' exists in bucket '%s'." % (keyName, bucketName)
)
elif existing is None:
pass
else:
assert False
if key is None:
key = bucket.new_key(keyName)
except:
with panic():
s3.close()
else:
return key
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def downloadStream(self):
info = self
class DownloadPipe(ReadablePipe):
def writeTo(self, writable):
if info.content is not None:
writable.write(info.content)
elif info.version:
headers = info._s3EncryptionHeaders()
key = info.outer.filesBucket.get_key(info.fileID, validate=False)
for attempt in retry_s3():
with attempt:
key.get_contents_to_file(
writable, headers=headers, version_id=info.version
)
else:
assert False
thread = ExceptionalThread(target=writer)
thread.start()
yield readable
thread.join()
|
def downloadStream(self):
readable_fh, writable_fh = os.pipe()
with os.fdopen(readable_fh, "r") as readable:
with os.fdopen(writable_fh, "w") as writable:
def writer():
try:
if self.content is not None:
writable.write(self.content)
elif self.version:
headers = self._s3EncryptionHeaders()
key = self.outer.filesBucket.get_key(
self.fileID, validate=False
)
for attempt in retry_s3():
with attempt:
key.get_contents_to_file(
writable, headers=headers, version_id=self.version
)
else:
assert False
finally:
# This close() will send EOF to the reading end and ultimately cause
# the yield to return. It also makes the implict .close() done by the
# enclosing "with" context redundant but that should be ok since
# .close() on file objects are idempotent.
writable.close()
thread = ExceptionalThread(target=writer)
thread.start()
yield readable
thread.join()
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def __init__(self, locator, jobChunkSize=maxAzureTablePropertySize):
super(AzureJobStore, self).__init__()
accountName, namePrefix = locator.split(":", 1)
if "--" in namePrefix:
raise ValueError(
"Invalid name prefix '%s'. Name prefixes may not contain %s."
% (namePrefix, self.nameSeparator)
)
if not self.containerNameRe.match(namePrefix):
raise ValueError(
"Invalid name prefix '%s'. Name prefixes must contain only digits, "
"hyphens or lower-case letters and must not start or end in a "
"hyphen." % namePrefix
)
# Reserve 13 for separator and suffix
if len(namePrefix) > self.maxContainerNameLen - self.maxNameLen - len(
self.nameSeparator
):
raise ValueError(
(
"Invalid name prefix '%s'. Name prefixes may not be longer than 50 "
"characters." % namePrefix
)
)
if "--" in namePrefix:
raise ValueError(
"Invalid name prefix '%s'. Name prefixes may not contain "
"%s." % (namePrefix, self.nameSeparator)
)
self.locator = locator
self.jobChunkSize = jobChunkSize
self.accountKey = _fetchAzureAccountKey(accountName)
self.accountName = accountName
# Table names have strict requirements in Azure
self.namePrefix = self._sanitizeTableName(namePrefix)
# These are the main API entry points.
self.tableService = TableService(
account_key=self.accountKey, account_name=accountName
)
self.blobService = BlobService(
account_key=self.accountKey, account_name=accountName
)
# Serialized jobs table
self.jobItems = None
# Job<->file mapping table
self.jobFileIDs = None
# Container for all shared and unshared files
self.files = None
# Stats and logging strings
self.statsFiles = None
# File IDs that contain stats and logging strings
self.statsFileIDs = None
|
def __init__(
self, accountName, namePrefix, config=None, jobChunkSize=maxAzureTablePropertySize
):
self.jobChunkSize = jobChunkSize
self.keyPath = None
self.account_key = _fetchAzureAccountKey(accountName)
self.accountName = accountName
# Table names have strict requirements in Azure
self.namePrefix = self._sanitizeTableName(namePrefix)
logger.debug("Creating job store with name prefix '%s'" % self.namePrefix)
# These are the main API entrypoints.
self.tableService = TableService(
account_key=self.account_key, account_name=accountName
)
self.blobService = BlobService(
account_key=self.account_key, account_name=accountName
)
exists = self._jobStoreExists()
self._checkJobStoreCreation(
config is not None, exists, accountName + ":" + self.namePrefix
)
# Serialized jobs table
self.jobItems = self._getOrCreateTable(self.qualify("jobs"))
# Job<->file mapping table
self.jobFileIDs = self._getOrCreateTable(self.qualify("jobFileIDs"))
# Container for all shared and unshared files
self.files = self._getOrCreateBlobContainer(self.qualify("files"))
# Stats and logging strings
self.statsFiles = self._getOrCreateBlobContainer(self.qualify("statsfiles"))
# File IDs that contain stats and logging strings
self.statsFileIDs = self._getOrCreateTable(self.qualify("statsFileIDs"))
super(AzureJobStore, self).__init__(config=config)
if self.config.cseKey is not None:
self.keyPath = self.config.cseKey
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def _jobStoreExists(self):
"""
Checks if job store exists by querying the existence of the statsFileIDs table. Note that
this is the last component that is deleted in :meth:`.destroy`.
"""
for attempt in retry_azure():
with attempt:
try:
table = self.tableService.query_tables(
table_name=self._qualify("statsFileIDs")
)
except AzureMissingResourceHttpError as e:
if e.status_code == 404:
return False
else:
raise
else:
return table is not None
|
def _jobStoreExists(self):
"""
Checks if job store exists by querying the existence of the statsFileIDs table. Note that
this is the last component that is deleted in deleteJobStore.
"""
for attempt in retry_azure():
with attempt:
try:
table = self.tableService.query_tables(
table_name=self.qualify("statsFileIDs")
)
return table is not None
except AzureMissingResourceHttpError as e:
if e.status_code == 404:
return False
else:
raise
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def getEnv(self):
return dict(AZURE_ACCOUNT_KEY=self.accountKey)
|
def getEnv(self):
return dict(AZURE_ACCOUNT_KEY=self.account_key)
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def _readFromUrl(cls, url, writable):
blob = cls._parseWasbUrl(url)
blob.service.get_blob_to_file(
container_name=blob.container, blob_name=blob.name, stream=writable
)
|
def _readFromUrl(cls, url, writable):
blobService, containerName, blobName = cls._extractBlobInfoFromUrl(url)
blobService.get_blob_to_file(containerName, blobName, writable)
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def _writeToUrl(cls, readable, url):
blob = cls._parseWasbUrl(url)
blob.service.put_block_blob_from_file(
container_name=blob.container, blob_name=blob.name, stream=readable
)
|
def _writeToUrl(cls, readable, url):
blobService, containerName, blobName = cls._extractBlobInfoFromUrl(url)
blobService.put_block_blob_from_file(containerName, blobName, readable)
blobService.get_blob(containerName, blobName)
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def _supportsUrl(cls, url, export=False):
return url.scheme.lower() in ("wasb", "wasbs")
|
def _supportsUrl(cls, url, export=False):
return url.scheme.lower() == "wasb" or url.scheme.lower() == "wasbs"
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def _uploadStream(
self, jobStoreFileID, container, checkForModification=False, encrypted=None
):
"""
:param encrypted: True to enforce encryption (will raise exception unless key is set),
False to prevent encryption or None to encrypt if key is set.
"""
if checkForModification:
try:
expectedVersion = container.get_blob_properties(blob_name=jobStoreFileID)[
"etag"
]
except AzureMissingResourceHttpError:
expectedVersion = None
if encrypted is None:
encrypted = self.keyPath is not None
elif encrypted:
if self.keyPath is None:
raise RuntimeError("Encryption requested but no key was provided")
maxBlockSize = self._maxAzureBlockBytes
if encrypted:
# There is a small overhead for encrypted data.
maxBlockSize -= encryption.overhead
readable_fh, writable_fh = os.pipe()
with os.fdopen(readable_fh, "r") as readable:
with os.fdopen(writable_fh, "w") as writable:
def reader():
blockIDs = []
try:
while True:
buf = readable.read(maxBlockSize)
if len(buf) == 0:
# We're safe to break here even if we never read anything, since
# putting an empty block list creates an empty blob.
break
if encrypted:
buf = encryption.encrypt(buf, self.keyPath)
blockID = self._newFileID()
container.put_block(
blob_name=jobStoreFileID, block=buf, blockid=blockID
)
blockIDs.append(blockID)
except:
with panic(log=logger):
# This is guaranteed to delete any uncommitted blocks.
container.delete_blob(blob_name=jobStoreFileID)
if checkForModification and expectedVersion is not None:
# Acquire a (60-second) write lock,
leaseID = container.lease_blob(
blob_name=jobStoreFileID, x_ms_lease_action="acquire"
)["x-ms-lease-id"]
# check for modification,
blobProperties = container.get_blob_properties(
blob_name=jobStoreFileID
)
if blobProperties["etag"] != expectedVersion:
container.lease_blob(
blob_name=jobStoreFileID,
x_ms_lease_action="release",
x_ms_lease_id=leaseID,
)
raise ConcurrentFileModificationException(jobStoreFileID)
# commit the file,
container.put_block_list(
blob_name=jobStoreFileID,
block_list=blockIDs,
x_ms_lease_id=leaseID,
x_ms_meta_name_values=dict(encrypted=str(encrypted)),
)
# then release the lock.
container.lease_blob(
blob_name=jobStoreFileID,
x_ms_lease_action="release",
x_ms_lease_id=leaseID,
)
else:
# No need to check for modification, just blindly write over whatever
# was there.
container.put_block_list(
blob_name=jobStoreFileID,
block_list=blockIDs,
x_ms_meta_name_values=dict(encrypted=str(encrypted)),
)
thread = ExceptionalThread(target=reader)
thread.start()
yield writable
# The writable is now closed. This will send EOF to the readable and cause the reader
# thread to finish.
thread.join()
|
def _uploadStream(
self, jobStoreFileID, container, checkForModification=False, encrypted=None
):
"""
:param encrypted: True to enforce encryption (will raise exception unless key is set),
False to prevent encryption or None to encrypt if key is set.
"""
if checkForModification:
try:
expectedVersion = container.get_blob_properties(blob_name=jobStoreFileID)[
"etag"
]
except AzureMissingResourceHttpError:
expectedVersion = None
if encrypted is None:
encrypted = self.keyPath is not None
elif encrypted:
if self.keyPath is None:
raise RuntimeError("Encryption requested but no key was provided")
maxBlockSize = self._maxAzureBlockBytes
if encrypted:
# There is a small overhead for encrypted data.
maxBlockSize -= encryption.overhead
readable_fh, writable_fh = os.pipe()
with os.fdopen(readable_fh, "r") as readable:
with os.fdopen(writable_fh, "w") as writable:
def reader():
blockIDs = []
try:
while True:
buf = readable.read(maxBlockSize)
if len(buf) == 0:
# We're safe to break here even if we never read anything, since
# putting an empty block list creates an empty blob.
break
if encrypted:
buf = encryption.encrypt(buf, self.keyPath)
blockID = self._newFileID()
container.put_block(
blob_name=jobStoreFileID, block=buf, blockid=blockID
)
blockIDs.append(blockID)
except:
# This is guaranteed to delete any uncommitted
# blocks.
container.delete_blob(blob_name=jobStoreFileID)
raise
if checkForModification and expectedVersion is not None:
# Acquire a (60-second) write lock,
leaseID = container.lease_blob(
blob_name=jobStoreFileID, x_ms_lease_action="acquire"
)["x-ms-lease-id"]
# check for modification,
blobProperties = container.get_blob_properties(
blob_name=jobStoreFileID
)
if blobProperties["etag"] != expectedVersion:
container.lease_blob(
blob_name=jobStoreFileID,
x_ms_lease_action="release",
x_ms_lease_id=leaseID,
)
raise ConcurrentFileModificationException(jobStoreFileID)
# commit the file,
container.put_block_list(
blob_name=jobStoreFileID,
block_list=blockIDs,
x_ms_lease_id=leaseID,
x_ms_meta_name_values=dict(encrypted=str(encrypted)),
)
# then release the lock.
container.lease_blob(
blob_name=jobStoreFileID,
x_ms_lease_action="release",
x_ms_lease_id=leaseID,
)
else:
# No need to check for modification, just blindly write over whatever
# was there.
container.put_block_list(
blob_name=jobStoreFileID,
block_list=blockIDs,
x_ms_meta_name_values=dict(encrypted=str(encrypted)),
)
thread = ExceptionalThread(target=reader)
thread.start()
yield writable
# The writable is now closed. This will send EOF to the readable and cause that
# thread to finish.
thread.join()
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def reader():
blockIDs = []
try:
while True:
buf = readable.read(maxBlockSize)
if len(buf) == 0:
# We're safe to break here even if we never read anything, since
# putting an empty block list creates an empty blob.
break
if encrypted:
buf = encryption.encrypt(buf, self.keyPath)
blockID = self._newFileID()
container.put_block(blob_name=jobStoreFileID, block=buf, blockid=blockID)
blockIDs.append(blockID)
except:
with panic(log=logger):
# This is guaranteed to delete any uncommitted blocks.
container.delete_blob(blob_name=jobStoreFileID)
if checkForModification and expectedVersion is not None:
# Acquire a (60-second) write lock,
leaseID = container.lease_blob(
blob_name=jobStoreFileID, x_ms_lease_action="acquire"
)["x-ms-lease-id"]
# check for modification,
blobProperties = container.get_blob_properties(blob_name=jobStoreFileID)
if blobProperties["etag"] != expectedVersion:
container.lease_blob(
blob_name=jobStoreFileID,
x_ms_lease_action="release",
x_ms_lease_id=leaseID,
)
raise ConcurrentFileModificationException(jobStoreFileID)
# commit the file,
container.put_block_list(
blob_name=jobStoreFileID,
block_list=blockIDs,
x_ms_lease_id=leaseID,
x_ms_meta_name_values=dict(encrypted=str(encrypted)),
)
# then release the lock.
container.lease_blob(
blob_name=jobStoreFileID, x_ms_lease_action="release", x_ms_lease_id=leaseID
)
else:
# No need to check for modification, just blindly write over whatever
# was there.
container.put_block_list(
blob_name=jobStoreFileID,
block_list=blockIDs,
x_ms_meta_name_values=dict(encrypted=str(encrypted)),
)
|
def reader():
blockIDs = []
try:
while True:
buf = readable.read(maxBlockSize)
if len(buf) == 0:
# We're safe to break here even if we never read anything, since
# putting an empty block list creates an empty blob.
break
if encrypted:
buf = encryption.encrypt(buf, self.keyPath)
blockID = self._newFileID()
container.put_block(blob_name=jobStoreFileID, block=buf, blockid=blockID)
blockIDs.append(blockID)
except:
# This is guaranteed to delete any uncommitted
# blocks.
container.delete_blob(blob_name=jobStoreFileID)
raise
if checkForModification and expectedVersion is not None:
# Acquire a (60-second) write lock,
leaseID = container.lease_blob(
blob_name=jobStoreFileID, x_ms_lease_action="acquire"
)["x-ms-lease-id"]
# check for modification,
blobProperties = container.get_blob_properties(blob_name=jobStoreFileID)
if blobProperties["etag"] != expectedVersion:
container.lease_blob(
blob_name=jobStoreFileID,
x_ms_lease_action="release",
x_ms_lease_id=leaseID,
)
raise ConcurrentFileModificationException(jobStoreFileID)
# commit the file,
container.put_block_list(
blob_name=jobStoreFileID,
block_list=blockIDs,
x_ms_lease_id=leaseID,
x_ms_meta_name_values=dict(encrypted=str(encrypted)),
)
# then release the lock.
container.lease_blob(
blob_name=jobStoreFileID, x_ms_lease_action="release", x_ms_lease_id=leaseID
)
else:
# No need to check for modification, just blindly write over whatever
# was there.
container.put_block_list(
blob_name=jobStoreFileID,
block_list=blockIDs,
x_ms_meta_name_values=dict(encrypted=str(encrypted)),
)
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def __init__(self, path):
"""
:param str path: Path to directory holding the job store
"""
super(FileJobStore, self).__init__()
self.jobStoreDir = absSymPath(path)
logger.info("Path to job store directory is '%s'.", self.jobStoreDir)
# Directory where temporary files go
self.tempFilesDir = os.path.join(self.jobStoreDir, "tmp")
|
def __init__(self, jobStoreDir, config=None):
"""
:param jobStoreDir: Place to create jobStore
:param config: See jobStores.abstractJobStore.AbstractJobStore.__init__
:raise RuntimeError: if config != None and the jobStore already exists or
config == None and the jobStore does not already exists.
"""
# This is root directory in which everything in the store is kept
self.jobStoreDir = absSymPath(jobStoreDir)
logger.info("Jobstore directory is: %s", self.jobStoreDir)
# Safety checks for existing jobStore
self._checkJobStoreCreation(
create=config is not None,
exists=os.path.exists(self.jobStoreDir),
locator=self.jobStoreDir,
)
# Directory where temporary files go
self.tempFilesDir = os.path.join(self.jobStoreDir, "tmp")
# Creation of jobStore, if necessary
if config is not None:
os.mkdir(self.jobStoreDir)
os.mkdir(self.tempFilesDir)
# Parameters for creating temporary files
self.validDirs = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
self.levels = 2
super(FileJobStore, self).__init__(config=config)
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def main():
parser = getBasicOptionParser()
parser.add_argument(
"jobStore",
type=str,
help="The location of the job store to delete. " + jobStoreLocatorHelp,
)
parser.add_argument("--version", action="version", version=version)
options = parseBasicOptions(parser)
logger.info("Attempting to delete the job store")
jobStore = Toil.getJobStore(options.jobStore)
jobStore.destroy()
logger.info("Successfully deleted the job store")
|
def main():
"""Removes the JobStore from a toil run."""
##########################################
# Construct the arguments.
##########################################
parser = getBasicOptionParser()
parser.add_argument(
"jobStore",
type=str,
help=(
"Store in which to place job management files \
and the global accessed temporary files"
"(If this is a file path this needs to be globally accessible "
"by all machines running jobs).\n"
"If the store already exists and restart is false an"
" JobStoreCreationException exception will be thrown."
),
)
parser.add_argument("--version", action="version", version=version)
options = parseBasicOptions(parser)
logger.info("Parsed arguments")
##########################################
# Survey the status of the job and report.
##########################################
logger.info("Checking if we have files for toil")
try:
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
except JobStoreCreationException:
logger.info(
"The specified JobStore does not exist, it may have already been deleted"
)
sys.exit(0)
logger.info("Attempting to delete the job store")
jobStore.deleteJobStore()
logger.info("Successfully deleted the job store")
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def main():
parser = getBasicOptionParser()
parser.add_argument(
"jobStore",
type=str,
help="The location of the job store used by the workflow whose jobs should "
"be killed." + jobStoreLocatorHelp,
)
parser.add_argument("--version", action="version", version=version)
options = parseBasicOptions(parser)
jobStore = Toil.resumeJobStore(options.jobStore)
logger.info(
"Starting routine to kill running jobs in the toil workflow: %s"
% options.jobStore
)
####This behaviour is now broken
batchSystem = Toil.createBatchSystem(
jobStore.config
) # This should automatically kill the existing jobs.. so we're good.
for jobID in batchSystem.getIssuedBatchJobIDs(): # Just in case we do it again.
batchSystem.killBatchJobs(jobID)
logger.info("All jobs SHOULD have been killed")
|
def main():
parser = getBasicOptionParser()
parser.add_argument(
"jobStore",
type=str,
help=(
"Store in which to place job management files \
and the global accessed temporary files"
"(If this is a file path this needs to be globally accessible "
"by all machines running jobs).\n"
"If the store already exists and restart is false an"
" JobStoreCreationException exception will be thrown."
),
)
parser.add_argument("--version", action="version", version=version)
options = parseBasicOptions(parser)
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
logger.info(
"Starting routine to kill running jobs in the toil workflow: %s"
% options.jobStore
)
####This behaviour is now broken
batchSystem = Toil.createBatchSystem(
jobStore.config
) # This should automatically kill the existing jobs.. so we're good.
for jobID in batchSystem.getIssuedBatchJobIDs(): # Just in case we do it again.
batchSystem.killBatchJobs(jobID)
logger.info("All jobs SHOULD have been killed")
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def main():
"""Reports the state of the toil."""
##########################################
# Construct the arguments.
##########################################
parser = getBasicOptionParser()
parser.add_argument(
"jobStore",
type=str,
help="The location of a job store that holds the information about the "
"workflow whose status is to be reported on." + jobStoreLocatorHelp,
)
parser.add_argument(
"--verbose",
dest="verbose",
action="store_true",
help="Print loads of information, particularly all the log files of \
jobs that failed. default=%(default)s",
default=False,
)
parser.add_argument(
"--failIfNotComplete",
dest="failIfNotComplete",
action="store_true",
help="Return exit value of 1 if toil jobs not all completed. default=%(default)s",
default=False,
)
parser.add_argument("--version", action="version", version=version)
options = parseBasicOptions(parser)
logger.info("Parsed arguments")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
##########################################
# Do some checks.
##########################################
logger.info("Checking if we have files for Toil")
assert options.jobStore is not None
##########################################
# Survey the status of the job and report.
##########################################
jobStore = Toil.resumeJobStore(options.jobStore)
try:
rootJob = jobStore.loadRootJob()
except JobException:
print(
"The root job of the job store is absent, the workflow completed successfully.",
file=sys.stderr,
)
sys.exit(0)
toilState = ToilState(jobStore, rootJob)
# The first element of the toilState.updatedJobs tuple is the jobWrapper we want to inspect
totalJobs = set(toilState.successorCounts.keys()) | {
jobTuple[0] for jobTuple in toilState.updatedJobs
}
failedJobs = [job for job in totalJobs if job.remainingRetryCount == 0]
print(
"There are %i active jobs, %i parent jobs with children, and %i totally failed jobs "
"currently in %s."
% (
len(toilState.updatedJobs),
len(toilState.successorCounts),
len(failedJobs),
options.jobStore,
),
file=sys.stderr,
)
if (
options.verbose
): # Verbose currently means outputting the files that have failed.
for job in failedJobs:
if job.logJobStoreFileID is not None:
with job.getLogFileHandle(jobStore) as logFileHandle:
logStream(logFileHandle, job.jobStoreID, logger.warn)
else:
print(
"Log file for job %s is absent." % job.jobStoreID, file=sys.stderr
)
if len(failedJobs) == 0:
print("There are no failed jobs to report.", file=sys.stderr)
if (
len(toilState.updatedJobs) + len(toilState.successorCounts)
) != 0 and options.failIfNotComplete:
sys.exit(1)
|
def main():
"""Reports the state of the toil."""
##########################################
# Construct the arguments.
##########################################
parser = getBasicOptionParser()
parser.add_argument(
"jobStore",
type=str,
help=(
"Store in which to place job management files \
and the global accessed temporary files"
"(If this is a file path this needs to be globally accessible "
"by all machines running jobs).\n"
"If the store already exists and restart is false an"
" JobStoreCreationException exception will be thrown."
),
)
parser.add_argument(
"--verbose",
dest="verbose",
action="store_true",
help="Print loads of information, particularly all the log files of \
jobs that failed. default=%(default)s",
default=False,
)
parser.add_argument(
"--failIfNotComplete",
dest="failIfNotComplete",
action="store_true",
help="Return exit value of 1 if toil jobs not all completed. default=%(default)s",
default=False,
)
parser.add_argument("--version", action="version", version=version)
options = parseBasicOptions(parser)
logger.info("Parsed arguments")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
##########################################
# Do some checks.
##########################################
logger.info("Checking if we have files for Toil")
assert options.jobStore is not None
##########################################
# Survey the status of the job and report.
##########################################
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
try:
rootJob = jobStore.loadRootJob()
except JobException:
print(
"The root job of the job store is absent, the workflow completed successfully.",
file=sys.stderr,
)
sys.exit(0)
toilState = ToilState(jobStore, rootJob)
# The first element of the toilState.updatedJobs tuple is the jobWrapper we want to inspect
totalJobs = set(toilState.successorCounts.keys()) | {
jobTuple[0] for jobTuple in toilState.updatedJobs
}
failedJobs = [job for job in totalJobs if job.remainingRetryCount == 0]
print(
"There are %i active jobs, %i parent jobs with children, and %i totally failed jobs "
"currently in %s."
% (
len(toilState.updatedJobs),
len(toilState.successorCounts),
len(failedJobs),
options.jobStore,
),
file=sys.stderr,
)
if (
options.verbose
): # Verbose currently means outputting the files that have failed.
for job in failedJobs:
if job.logJobStoreFileID is not None:
with job.getLogFileHandle(jobStore) as logFileHandle:
logStream(logFileHandle, job.jobStoreID, logger.warn)
else:
print(
"Log file for job %s is absent." % job.jobStoreID, file=sys.stderr
)
if len(failedJobs) == 0:
print("There are no failed jobs to report.", file=sys.stderr)
if (
len(toilState.updatedJobs) + len(toilState.successorCounts)
) != 0 and options.failIfNotComplete:
sys.exit(1)
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def importFile(self, srcUrl, sharedFileName=None):
"""
Imports the file at the given URL into job store. The ID of the newly imported file is
returned. If the name of a shared file name is provided, the file will be imported as
such and None is returned.
Currently supported schemes are:
- 's3' for objects in Amazon S3
e.g. s3://bucket/key
- 'wasb' for blobs in Azure Blob Storage
e.g. wasb://container/blob
- 'file' for local files
e.g. file:///local/file/path
- 'http'
e.g. http://someurl.com/path
:param str srcUrl: URL that points to a file or object in the storage mechanism of a
supported URL scheme e.g. a blob in an Azure Blob Storage container.
:param str sharedFileName: Optional name to assign to the imported file within the job store
:return The jobStoreFileId of the imported file or None if sharedFileName was given
:rtype: str|None
"""
# Note that the helper method _importFile is used to read from the source and write to
# destination (which is the current job store in this case). To implement any
# optimizations that circumvent this, the _importFile method should be overridden by
# subclasses of AbstractJobStore.
srcUrl = urlparse.urlparse(srcUrl)
otherCls = self._findJobStoreForUrl(srcUrl)
return self._importFile(otherCls, srcUrl, sharedFileName=sharedFileName)
|
def importFile(self, srcUrl, sharedFileName=None):
"""
Imports the file at the given URL into job store. The jobStoreFileId of the new
file is returned. If a shared file name is given, the file will be imported as a shared
file and None is returned.
Note that the helper method _importFile is used to read from the source and write to
destination (which is the current job store in this case). To implement any optimizations that
circumvent this, the _importFile method should be overridden by subclasses of AbstractJobStore.
Currently supported schemes are:
- 's3' for objects in Amazon S3
e.g. s3://bucket/key
- 'wasb' for blobs in Azure Blob Storage
e.g. wasb://container/blob
- 'file' for local files
e.g. file:///local/file/path
- 'http'
e.g. http://someurl.com/path
:param str srcUrl: URL that points to a file or object in the storage mechanism of a
supported URL scheme e.g. a blob in an Azure Blob Storage container.
:param str sharedFileName: Optional name to assign to the imported file within the job store
:return The jobStoreFileId of the imported file or None if sharedFileName was given
:rtype: str|None
"""
url = urlparse.urlparse(srcUrl)
otherCls = findJobStoreForUrl(url)
return self._importFile(otherCls, url, sharedFileName=sharedFileName)
|
https://github.com/DataBiosphere/toil/issues/785
|
ubuntu@ip-172-31-24-34:~/common-workflow-language$ toil clean aws:us-west-2:peter-cwltoil-test
2016-04-13 15:36:32,632 INFO:toil.lib.bioio: Logging set at level: INFO
2016-04-13 15:36:32,632 INFO:toil.utils.toilClean: Parsed arguments
2016-04-13 15:36:32,633 INFO:toil.utils.toilClean: Checking if we have files for toil
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2.dev134', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 501, in loadOrCreateJobStore
return AWSJobStore.loadOrCreateJobStore(jobStoreArgs, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 117, in loadOrCreateJobStore
return cls(region, namePrefix, config=config, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 174, in __init__
self.filesBucket = self._getOrCreateBucket(qualify('files'), versioning=True)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 469, in _getOrCreateBucket
bucket = self.s3.create_bucket(bucket_name, location=self.region)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/connection.py", line 616, in create_bucket
response.status, response.reason, body)
boto.exception.S3CreateError: S3CreateError: 409 Conflict
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketAlreadyExists</Code><Message>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</Message><BucketName>peter-cwltoil-test--files</BucketName><RequestId>DAF5D5E6B138F1AB</RequestId><HostId>t9Fz0/fKNcxA5q7gLBuk7FMu9AxCD4AdyXMIWhWOtJVyNS8ARDxnWDQOBZdeTmpkcVtXa9brMss=</HostId></Error>
|
boto.exception.S3CreateError
|
def exportFile(self, jobStoreFileID, dstUrl):
"""
Exports file to destination pointed at by the destination URL.
Refer to AbstractJobStore.importFile documentation for currently supported URL schemes.
Note that the helper method _exportFile is used to read from the source and write to
destination. To implement any optimizations that circumvent this, the _exportFile method
should be overridden by subclasses of AbstractJobStore.
:param str jobStoreFileID: The id of the file in the job store that should be exported.
:param str dstUrl: URL that points to a file or object in the storage mechanism of a
supported URL scheme e.g. a blob in an Azure Blob Storage container.
"""
dstUrl = urlparse.urlparse(dstUrl)
otherCls = self._findJobStoreForUrl(dstUrl, export=True)
return self._exportFile(otherCls, jobStoreFileID, dstUrl)
|
def exportFile(self, jobStoreFileID, dstUrl):
"""
Exports file to destination pointed at by the destination URL.
Refer to AbstractJobStore.importFile documentation for currently supported URL schemes.
Note that the helper method _exportFile is used to read from the source and write to
destination. To implement any optimizations that circumvent this, the _exportFile method
should be overridden by subclasses of AbstractJobStore.
:param str jobStoreFileID: The id of the file in the job store that should be exported.
:param str dstUrl: URL that points to a file or object in the storage mechanism of a
supported URL scheme e.g. a blob in an Azure Blob Storage container.
"""
url = urlparse.urlparse(dstUrl)
return self._exportFile(findJobStoreForUrl(url, export=True), jobStoreFileID, url)
|
https://github.com/DataBiosphere/toil/issues/785
|
ubuntu@ip-172-31-24-34:~/common-workflow-language$ toil clean aws:us-west-2:peter-cwltoil-test
2016-04-13 15:36:32,632 INFO:toil.lib.bioio: Logging set at level: INFO
2016-04-13 15:36:32,632 INFO:toil.utils.toilClean: Parsed arguments
2016-04-13 15:36:32,633 INFO:toil.utils.toilClean: Checking if we have files for toil
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2.dev134', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 501, in loadOrCreateJobStore
return AWSJobStore.loadOrCreateJobStore(jobStoreArgs, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 117, in loadOrCreateJobStore
return cls(region, namePrefix, config=config, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 174, in __init__
self.filesBucket = self._getOrCreateBucket(qualify('files'), versioning=True)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 469, in _getOrCreateBucket
bucket = self.s3.create_bucket(bucket_name, location=self.region)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/connection.py", line 616, in create_bucket
response.status, response.reason, body)
boto.exception.S3CreateError: S3CreateError: 409 Conflict
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketAlreadyExists</Code><Message>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</Message><BucketName>peter-cwltoil-test--files</BucketName><RequestId>DAF5D5E6B138F1AB</RequestId><HostId>t9Fz0/fKNcxA5q7gLBuk7FMu9AxCD4AdyXMIWhWOtJVyNS8ARDxnWDQOBZdeTmpkcVtXa9brMss=</HostId></Error>
|
boto.exception.S3CreateError
|
def _exportFile(self, otherCls, jobStoreFileID, url):
"""
Refer to exportFile docstring for information about this method.
:param AbstractJobStore otherCls: The concrete subclass of AbstractJobStore that supports
exporting to the given URL. Note that the type annotation here is not completely
accurate. This is not an instance, it's a class, but there is no way to reflect
that in PEP-484 type hints.
:param str jobStoreFileID: The id of the file that will be exported.
:param urlparse.ParseResult url: The parsed URL of the file to export to.
"""
with self.readFileStream(jobStoreFileID) as readable:
otherCls._writeToUrl(readable, url)
|
def _exportFile(self, otherCls, jobStoreFileID, url):
"""
Refer to exportFile docstring for information about this method.
:param type otherCls: The concrete subclass of AbstractJobStore that supports exporting to the given URL.
:param str jobStoreFileID: The id of the file that will be exported.
:param urlparse.ParseResult url: The parsed url given to importFile.
"""
with self.readFileStream(jobStoreFileID) as readable:
otherCls._writeToUrl(readable, url)
|
https://github.com/DataBiosphere/toil/issues/785
|
ubuntu@ip-172-31-24-34:~/common-workflow-language$ toil clean aws:us-west-2:peter-cwltoil-test
2016-04-13 15:36:32,632 INFO:toil.lib.bioio: Logging set at level: INFO
2016-04-13 15:36:32,632 INFO:toil.utils.toilClean: Parsed arguments
2016-04-13 15:36:32,633 INFO:toil.utils.toilClean: Checking if we have files for toil
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2.dev134', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 501, in loadOrCreateJobStore
return AWSJobStore.loadOrCreateJobStore(jobStoreArgs, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 117, in loadOrCreateJobStore
return cls(region, namePrefix, config=config, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 174, in __init__
self.filesBucket = self._getOrCreateBucket(qualify('files'), versioning=True)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 469, in _getOrCreateBucket
bucket = self.s3.create_bucket(bucket_name, location=self.region)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/connection.py", line 616, in create_bucket
response.status, response.reason, body)
boto.exception.S3CreateError: S3CreateError: 409 Conflict
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketAlreadyExists</Code><Message>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</Message><BucketName>peter-cwltoil-test--files</BucketName><RequestId>DAF5D5E6B138F1AB</RequestId><HostId>t9Fz0/fKNcxA5q7gLBuk7FMu9AxCD4AdyXMIWhWOtJVyNS8ARDxnWDQOBZdeTmpkcVtXa9brMss=</HostId></Error>
|
boto.exception.S3CreateError
|
def getConnectedJobs(jobWrapper):
if jobWrapper.jobStoreID in reachableFromRoot:
return
reachableFromRoot.add(jobWrapper.jobStoreID)
# Traverse jobs in stack
for jobs in jobWrapper.stack:
for successorJobStoreID in map(lambda x: x[0], jobs):
if successorJobStoreID not in reachableFromRoot and haveJob(
successorJobStoreID
):
getConnectedJobs(getJob(successorJobStoreID))
# Traverse service jobs
for jobs in jobWrapper.services:
for serviceJobStoreID in map(lambda x: x[0], jobs):
if haveJob(serviceJobStoreID):
assert serviceJobStoreID not in reachableFromRoot
reachableFromRoot.add(serviceJobStoreID)
|
def getConnectedJobs(jobWrapper):
if jobWrapper.jobStoreID in reachableFromRoot:
return
reachableFromRoot.add(jobWrapper.jobStoreID)
# Traverse jobs in stack
for jobs in jobWrapper.stack:
for successorJobStoreID in map(lambda x: x[0], jobs):
if successorJobStoreID not in reachableFromRoot and haveJob(
successorJobStoreID
):
getConnectedJobs(getJob(successorJobStoreID))
# Traverse service jobs
for jobs in jobWrapper.services:
for serviceJobStoreID in map(lambda x: x[0], jobs):
if haveJob(serviceJobStoreID):
assert serviceJobStoreID not in reachableFromRoot
reachableFromRoot.add(serviceJobStoreID)
|
https://github.com/DataBiosphere/toil/issues/785
|
ubuntu@ip-172-31-24-34:~/common-workflow-language$ toil clean aws:us-west-2:peter-cwltoil-test
2016-04-13 15:36:32,632 INFO:toil.lib.bioio: Logging set at level: INFO
2016-04-13 15:36:32,632 INFO:toil.utils.toilClean: Parsed arguments
2016-04-13 15:36:32,633 INFO:toil.utils.toilClean: Checking if we have files for toil
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2.dev134', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 501, in loadOrCreateJobStore
return AWSJobStore.loadOrCreateJobStore(jobStoreArgs, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 117, in loadOrCreateJobStore
return cls(region, namePrefix, config=config, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 174, in __init__
self.filesBucket = self._getOrCreateBucket(qualify('files'), versioning=True)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 469, in _getOrCreateBucket
bucket = self.s3.create_bucket(bucket_name, location=self.region)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/connection.py", line 616, in create_bucket
response.status, response.reason, body)
boto.exception.S3CreateError: S3CreateError: 409 Conflict
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketAlreadyExists</Code><Message>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</Message><BucketName>peter-cwltoil-test--files</BucketName><RequestId>DAF5D5E6B138F1AB</RequestId><HostId>t9Fz0/fKNcxA5q7gLBuk7FMu9AxCD4AdyXMIWhWOtJVyNS8ARDxnWDQOBZdeTmpkcVtXa9brMss=</HostId></Error>
|
boto.exception.S3CreateError
|
def uploadStream(self, multipart=True, allowInlining=True):
info = self
store = self.outer
class MultiPartPipe(WritablePipe):
def readFrom(self, readable):
buf = readable.read(store.partSize)
if allowInlining and len(buf) <= info._maxInlinedSize():
info.content = buf
else:
headers = info._s3EncryptionHeaders()
for attempt in retry_s3():
with attempt:
upload = store.filesBucket.initiate_multipart_upload(
key_name=info.fileID, headers=headers
)
try:
for part_num in itertools.count():
# There must be at least one part, even if the file is empty.
if len(buf) == 0 and part_num > 0:
break
for attempt in retry_s3():
with attempt:
upload.upload_part_from_file(
fp=StringIO(buf),
# part numbers are 1-based
part_num=part_num + 1,
headers=headers,
)
if len(buf) == 0:
break
buf = readable.read(info.outer.partSize)
except:
with panic(log=log):
for attempt in retry_s3():
with attempt:
upload.cancel_upload()
else:
for attempt in retry_s3():
with attempt:
info.version = upload.complete_upload().version_id
class SinglePartPipe(WritablePipe):
def readFrom(self, readable):
buf = readable.read()
if allowInlining and len(buf) <= info._maxInlinedSize():
info.content = buf
else:
key = store.filesBucket.new_key(key_name=info.fileID)
buf = StringIO(buf)
headers = info._s3EncryptionHeaders()
for attempt in retry_s3():
with attempt:
assert buf.len == key.set_contents_from_file(
fp=buf, headers=headers
)
info.version = key.version_id
with MultiPartPipe() if multipart else SinglePartPipe() as writable:
yield writable
assert bool(self.version) == (self.content is None)
|
def uploadStream(self, multipart=True, allowInlining=True):
store = self.outer
readable_fh, writable_fh = os.pipe()
with os.fdopen(readable_fh, "r") as readable:
with os.fdopen(writable_fh, "w") as writable:
def multipartReader():
buf = readable.read(store.partSize)
if allowInlining and len(buf) <= self._maxInlinedSize():
self.content = buf
else:
headers = self._s3EncryptionHeaders()
for attempt in retry_s3():
with attempt:
upload = store.filesBucket.initiate_multipart_upload(
key_name=self.fileID, headers=headers
)
try:
for part_num in itertools.count():
# There must be at least one part, even if the file is empty.
if len(buf) == 0 and part_num > 0:
break
for attempt in retry_s3():
with attempt:
upload.upload_part_from_file(
fp=StringIO(buf),
# part numbers are 1-based
part_num=part_num + 1,
headers=headers,
)
if len(buf) == 0:
break
buf = readable.read(self.outer.partSize)
except:
with panic(log=log):
for attempt in retry_s3():
with attempt:
upload.cancel_upload()
else:
for attempt in retry_s3():
with attempt:
self.version = upload.complete_upload().version_id
def reader():
buf = readable.read()
if allowInlining and len(buf) <= self._maxInlinedSize():
self.content = buf
else:
key = store.filesBucket.new_key(key_name=self.fileID)
buf = StringIO(buf)
headers = self._s3EncryptionHeaders()
for attempt in retry_s3():
with attempt:
assert buf.len == key.set_contents_from_file(
fp=buf, headers=headers
)
self.version = key.version_id
thread = ExceptionalThread(target=multipartReader if multipart else reader)
thread.start()
yield writable
# The writable is now closed. This will send EOF to the readable and cause that
# thread to finish.
thread.join()
assert bool(self.version) == (self.content is None)
|
https://github.com/DataBiosphere/toil/issues/785
|
ubuntu@ip-172-31-24-34:~/common-workflow-language$ toil clean aws:us-west-2:peter-cwltoil-test
2016-04-13 15:36:32,632 INFO:toil.lib.bioio: Logging set at level: INFO
2016-04-13 15:36:32,632 INFO:toil.utils.toilClean: Parsed arguments
2016-04-13 15:36:32,633 INFO:toil.utils.toilClean: Checking if we have files for toil
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2.dev134', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 501, in loadOrCreateJobStore
return AWSJobStore.loadOrCreateJobStore(jobStoreArgs, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 117, in loadOrCreateJobStore
return cls(region, namePrefix, config=config, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 174, in __init__
self.filesBucket = self._getOrCreateBucket(qualify('files'), versioning=True)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 469, in _getOrCreateBucket
bucket = self.s3.create_bucket(bucket_name, location=self.region)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/connection.py", line 616, in create_bucket
response.status, response.reason, body)
boto.exception.S3CreateError: S3CreateError: 409 Conflict
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketAlreadyExists</Code><Message>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</Message><BucketName>peter-cwltoil-test--files</BucketName><RequestId>DAF5D5E6B138F1AB</RequestId><HostId>t9Fz0/fKNcxA5q7gLBuk7FMu9AxCD4AdyXMIWhWOtJVyNS8ARDxnWDQOBZdeTmpkcVtXa9brMss=</HostId></Error>
|
boto.exception.S3CreateError
|
def _downloadStream(self, jobStoreFileID, container):
# The reason this is not in the writer is so we catch non-existant blobs early
blobProps = container.get_blob_properties(blob_name=jobStoreFileID)
encrypted = strict_bool(blobProps["x-ms-meta-encrypted"])
if encrypted and self.keyPath is None:
raise AssertionError("Content is encrypted but no key was provided.")
outer_self = self
class DownloadPipe(ReadablePipe):
def writeTo(self, writable):
chunkStart = 0
fileSize = int(blobProps["Content-Length"])
while chunkStart < fileSize:
chunkEnd = chunkStart + outer_self._maxAzureBlockBytes - 1
buf = container.get_blob(
blob_name=jobStoreFileID,
x_ms_range="bytes=%d-%d" % (chunkStart, chunkEnd),
)
if encrypted:
buf = encryption.decrypt(buf, outer_self.keyPath)
writable.write(buf)
chunkStart = chunkEnd + 1
with DownloadPipe() as readable:
yield readable
|
def _downloadStream(self, jobStoreFileID, container):
# The reason this is not in the writer is so we catch non-existant blobs early
blobProps = container.get_blob_properties(blob_name=jobStoreFileID)
encrypted = strict_bool(blobProps["x-ms-meta-encrypted"])
if encrypted and self.keyPath is None:
raise AssertionError("Content is encrypted but no key was provided.")
readable_fh, writable_fh = os.pipe()
with os.fdopen(readable_fh, "r") as readable:
with os.fdopen(writable_fh, "w") as writable:
def writer():
try:
chunkStartPos = 0
fileSize = int(blobProps["Content-Length"])
while chunkStartPos < fileSize:
chunkEndPos = chunkStartPos + self._maxAzureBlockBytes - 1
buf = container.get_blob(
blob_name=jobStoreFileID,
x_ms_range="bytes=%d-%d" % (chunkStartPos, chunkEndPos),
)
if encrypted:
buf = encryption.decrypt(buf, self.keyPath)
writable.write(buf)
chunkStartPos = chunkEndPos + 1
finally:
# Ensure readers aren't left blocking if this thread crashes.
# This close() will send EOF to the reading end and ultimately cause the
# yield to return. It also makes the implict .close() done by the enclosing
# "with" context redundant but that should be ok since .close() on file
# objects are idempotent.
writable.close()
thread = ExceptionalThread(target=writer)
thread.start()
yield readable
thread.join()
|
https://github.com/DataBiosphere/toil/issues/785
|
ubuntu@ip-172-31-24-34:~/common-workflow-language$ toil clean aws:us-west-2:peter-cwltoil-test
2016-04-13 15:36:32,632 INFO:toil.lib.bioio: Logging set at level: INFO
2016-04-13 15:36:32,632 INFO:toil.utils.toilClean: Parsed arguments
2016-04-13 15:36:32,633 INFO:toil.utils.toilClean: Checking if we have files for toil
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2.dev134', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 501, in loadOrCreateJobStore
return AWSJobStore.loadOrCreateJobStore(jobStoreArgs, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 117, in loadOrCreateJobStore
return cls(region, namePrefix, config=config, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 174, in __init__
self.filesBucket = self._getOrCreateBucket(qualify('files'), versioning=True)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 469, in _getOrCreateBucket
bucket = self.s3.create_bucket(bucket_name, location=self.region)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/connection.py", line 616, in create_bucket
response.status, response.reason, body)
boto.exception.S3CreateError: S3CreateError: 409 Conflict
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketAlreadyExists</Code><Message>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</Message><BucketName>peter-cwltoil-test--files</BucketName><RequestId>DAF5D5E6B138F1AB</RequestId><HostId>t9Fz0/fKNcxA5q7gLBuk7FMu9AxCD4AdyXMIWhWOtJVyNS8ARDxnWDQOBZdeTmpkcVtXa9brMss=</HostId></Error>
|
boto.exception.S3CreateError
|
def _uploadStream(self, key, update=False, encrypt=True):
store = self
class UploadPipe(WritablePipe):
def readFrom(self, readable):
headers = store.encryptedHeaders if encrypt else store.headerValues
if update:
try:
key.set_contents_from_stream(readable, headers=headers)
except boto.exception.GSDataError:
if encrypt:
# https://github.com/boto/boto/issues/3518
# see self._writeFile for more
pass
else:
raise
else:
try:
# The if_generation argument insures that the existing key matches the
# given generation, i.e. version, before modifying anything. Passing a
# generation of 0 insures that the key does not exist remotely.
key.set_contents_from_stream(
readable, headers=headers, if_generation=0
)
except (
boto.exception.GSResponseError,
boto.exception.GSDataError,
) as e:
if isinstance(e, boto.exception.GSResponseError):
if e.status == 412:
raise ConcurrentFileModificationException(key.name)
else:
raise e
elif encrypt:
# https://github.com/boto/boto/issues/3518
# see self._writeFile for more
pass
else:
raise
with UploadPipe() as writable:
yield writable
|
def _uploadStream(self, key, update=False, encrypt=True):
readable_fh, writable_fh = os.pipe()
with os.fdopen(readable_fh, "r") as readable:
with os.fdopen(writable_fh, "w") as writable:
def writer():
headers = self.encryptedHeaders if encrypt else self.headerValues
if update:
try:
key.set_contents_from_stream(readable, headers=headers)
except boto.exception.GSDataError:
if encrypt:
# https://github.com/boto/boto/issues/3518
# see self._writeFile for more
pass
else:
raise
else:
try:
# The if_condition kwarg insures that the existing key matches given
# generation (version) before modifying anything. Setting
# if_generation=0 insures key does not exist remotely
key.set_contents_from_stream(
readable, headers=headers, if_generation=0
)
except (
boto.exception.GSResponseError,
boto.exception.GSDataError,
) as e:
if isinstance(e, boto.exception.GSResponseError):
if e.status == 412:
raise ConcurrentFileModificationException(key.name)
else:
raise e
elif encrypt:
# https://github.com/boto/boto/issues/3518
# see self._writeFile for more
pass
else:
raise
thread = ExceptionalThread(target=writer)
thread.start()
yield writable
thread.join()
|
https://github.com/DataBiosphere/toil/issues/785
|
ubuntu@ip-172-31-24-34:~/common-workflow-language$ toil clean aws:us-west-2:peter-cwltoil-test
2016-04-13 15:36:32,632 INFO:toil.lib.bioio: Logging set at level: INFO
2016-04-13 15:36:32,632 INFO:toil.utils.toilClean: Parsed arguments
2016-04-13 15:36:32,633 INFO:toil.utils.toilClean: Checking if we have files for toil
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2.dev134', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 501, in loadOrCreateJobStore
return AWSJobStore.loadOrCreateJobStore(jobStoreArgs, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 117, in loadOrCreateJobStore
return cls(region, namePrefix, config=config, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 174, in __init__
self.filesBucket = self._getOrCreateBucket(qualify('files'), versioning=True)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 469, in _getOrCreateBucket
bucket = self.s3.create_bucket(bucket_name, location=self.region)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/connection.py", line 616, in create_bucket
response.status, response.reason, body)
boto.exception.S3CreateError: S3CreateError: 409 Conflict
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketAlreadyExists</Code><Message>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</Message><BucketName>peter-cwltoil-test--files</BucketName><RequestId>DAF5D5E6B138F1AB</RequestId><HostId>t9Fz0/fKNcxA5q7gLBuk7FMu9AxCD4AdyXMIWhWOtJVyNS8ARDxnWDQOBZdeTmpkcVtXa9brMss=</HostId></Error>
|
boto.exception.S3CreateError
|
def _downloadStream(self, key, encrypt=True):
store = self
class DownloadPipe(ReadablePipe):
def writeTo(self, writable):
headers = store.encryptedHeaders if encrypt else store.headerValues
try:
key.get_file(writable, headers=headers)
finally:
writable.close()
with DownloadPipe() as readable:
yield readable
|
def _downloadStream(self, key, encrypt=True):
readable_fh, writable_fh = os.pipe()
with os.fdopen(readable_fh, "r") as readable:
with os.fdopen(writable_fh, "w") as writable:
def writer():
headers = self.encryptedHeaders if encrypt else self.headerValues
try:
key.get_file(writable, headers=headers)
finally:
writable.close()
thread = ExceptionalThread(target=writer)
thread.start()
yield readable
thread.join()
|
https://github.com/DataBiosphere/toil/issues/785
|
ubuntu@ip-172-31-24-34:~/common-workflow-language$ toil clean aws:us-west-2:peter-cwltoil-test
2016-04-13 15:36:32,632 INFO:toil.lib.bioio: Logging set at level: INFO
2016-04-13 15:36:32,632 INFO:toil.utils.toilClean: Parsed arguments
2016-04-13 15:36:32,633 INFO:toil.utils.toilClean: Checking if we have files for toil
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2.dev134', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 501, in loadOrCreateJobStore
return AWSJobStore.loadOrCreateJobStore(jobStoreArgs, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 117, in loadOrCreateJobStore
return cls(region, namePrefix, config=config, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 174, in __init__
self.filesBucket = self._getOrCreateBucket(qualify('files'), versioning=True)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 469, in _getOrCreateBucket
bucket = self.s3.create_bucket(bucket_name, location=self.region)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/connection.py", line 616, in create_bucket
response.status, response.reason, body)
boto.exception.S3CreateError: S3CreateError: 409 Conflict
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketAlreadyExists</Code><Message>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</Message><BucketName>peter-cwltoil-test--files</BucketName><RequestId>DAF5D5E6B138F1AB</RequestId><HostId>t9Fz0/fKNcxA5q7gLBuk7FMu9AxCD4AdyXMIWhWOtJVyNS8ARDxnWDQOBZdeTmpkcVtXa9brMss=</HostId></Error>
|
boto.exception.S3CreateError
|
def _updatePredecessorStatus(self, jobStoreID):
"""
Update status of a predecessor for finished successor job.
"""
if jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob:
# Is a service job
predecessorJob = self.toilState.serviceJobStoreIDToPredecessorJob.pop(
jobStoreID
)
self.toilState.servicesIssued[predecessorJob.jobStoreID].pop(jobStoreID)
if (
len(self.toilState.servicesIssued[predecessorJob.jobStoreID]) == 0
): # Predecessor job has
# all its services terminated
self.toilState.servicesIssued.pop(
predecessorJob.jobStoreID
) # The job has no running services
self.toilState.updatedJobs.add((predecessorJob, 0)) # Now we know
# the job is done we can add it to the list of updated job files
logger.debug(
"Job %s services have completed or totally failed, adding to updated jobs"
% predecessorJob.jobStoreID
)
elif jobStoreID not in self.toilState.successorJobStoreIDToPredecessorJobs:
# We have reach the root job
assert len(self.toilState.updatedJobs) == 0
assert len(self.toilState.successorJobStoreIDToPredecessorJobs) == 0
assert len(self.toilState.successorCounts) == 0
else:
for predecessorJob in self.toilState.successorJobStoreIDToPredecessorJobs.pop(
jobStoreID
):
self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1
assert self.toilState.successorCounts[predecessorJob.jobStoreID] >= 0
if (
self.toilState.successorCounts[predecessorJob.jobStoreID] == 0
): # Job is done
self.toilState.successorCounts.pop(predecessorJob.jobStoreID)
predecessorJob.stack.pop()
logger.debug(
"Job %s has all its non-service successors completed or totally "
"failed",
predecessorJob.jobStoreID,
)
assert predecessorJob not in self.toilState.updatedJobs
self.toilState.updatedJobs.add((predecessorJob, 0)) # Now we know
|
def _updatePredecessorStatus(self, jobStoreID):
"""
Update status of a predecessor for finished successor job.
"""
if jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob:
# Is a service job
predecessorJob = self.toilState.serviceJobStoreIDToPredecessorJob.pop(
jobStoreID
)
self.toilState.servicesIssued[predecessorJob.jobStoreID].pop(jobStoreID)
if (
len(self.toilState.servicesIssued[predecessorJob.jobStoreID]) == 0
): # Predecessor job has
# all its services terminated
self.toilState.servicesIssued.pop(
predecessorJob.jobStoreID
) # The job has no running services
self.toilState.updatedJobs.add((predecessorJob, 0)) # Now we know
# the job is done we can add it to the list of updated job files
logger.debug(
"Job %s services have completed or totally failed, adding to updated jobs"
% predecessorJob.jobStoreID
)
elif jobStoreID not in self.toilState.successorJobStoreIDToPredecessorJobs:
# We have reach the root job
assert len(self.toilState.updatedJobs) == 0
assert len(self.toilState.successorJobStoreIDToPredecessorJobs) == 0
assert len(self.toilState.successorCounts) == 0
else:
for predecessorJob in self.toilState.successorJobStoreIDToPredecessorJobs.pop(
jobStoreID
):
self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1
assert self.toilState.successorCounts[predecessorJob.jobStoreID] >= 0
if (
self.toilState.successorCounts[predecessorJob.jobStoreID] == 0
): # Job is done
self.toilState.successorCounts.pop(predecessorJob.jobStoreID)
logger.debug(
"Job %s has all its non-service successors completed or totally "
"failed",
predecessorJob.jobStoreID,
)
assert predecessorJob not in self.toilState.updatedJobs
self.toilState.updatedJobs.add((predecessorJob, 0)) # Now we know
|
https://github.com/DataBiosphere/toil/issues/808
|
Repairing job: 1/X/jobPiXIW2
Repairing job: c/C/jobTbIFOx
---TOIL WORKER OUTPUT LOG---
---TOIL WORKER OUTPUT LOG---
Traceback (most recent call last):
File "/cluster/home/jcarmstr/toil/src/toil/worker.py", line 208, in main
jobWrapper = jobStore.load(jobStoreID)
File "/cluster/home/jcarmstr/toil/src/toil/jobStores/fileJobStore.py", line 111, in load
self._checkJobStoreId(jobStoreID)
File "/cluster/home/jcarmstr/toil/src/toil/jobStores/fileJobStore.py", line 329, in _checkJobStoreId
raise NoSuchJobException(jobStoreID)
NoSuchJobException: The job 'c/C/jobTbIFOx' does not exist
Traceback (most recent call last):
File "/cluster/home/jcarmstr/venv/bin/_toil_worker", line 8, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', '_toil_worker')()
File "/cluster/home/jcarmstr/toil/src/toil/worker.py", line 482, in main
jobWrapper = jobStore.load(jobStoreID)
File "/cluster/home/jcarmstr/toil/src/toil/jobStores/fileJobStore.py", line 111, in load
self._checkJobStoreId(jobStoreID)
File "/cluster/home/jcarmstr/toil/src/toil/jobStores/fileJobStore.py", line 329, in _checkJobStoreId
raise NoSuchJobException(jobStoreID)
toil.jobStores.abstractJobStore.NoSuchJobException: The job 'c/C/jobTbIFOx' does not exist
---TOIL WORKER OUTPUT LOG---
|
NoSuchJobException
|
def innerLoop(
jobStore,
config,
batchSystem,
toilState,
jobBatcher,
serviceManager,
statsAndLogging,
):
"""
:param toil.jobStores.abstractJobStore.AbstractJobStore jobStore:
:param toil.common.Config config:
:param toil.batchSystems.abstractBatchSystem.AbstractBatchSystem batchSystem:
:param ToilState toilState:
:param JobBatcher jobBatcher:
:param ServiceManager serviceManager:
:param StatsAndLogging statsAndLogging:
"""
# Putting this in separate function for easier reading
# Sets up the timing of the jobWrapper rescuing method
timeSinceJobsLastRescued = time.time()
logger.info("Starting the main loop")
while True:
# Process jobs that are ready to be scheduled/have successors to schedule
if len(toilState.updatedJobs) > 0:
logger.debug(
"Built the jobs list, currently have %i jobs to update and %i jobs issued",
len(toilState.updatedJobs),
jobBatcher.getNumberOfJobsIssued(),
)
updatedJobs = toilState.updatedJobs # The updated jobs to consider below
toilState.updatedJobs = set() # Resetting the list for the next set
for jobWrapper, resultStatus in updatedJobs:
logger.debug(
"Updating status of job: %s with result status: %s",
jobWrapper.jobStoreID,
resultStatus,
)
# This stops a job with services being issued by the serviceManager from
# being considered further in this loop. This catch is necessary because
# the job's service's can fail while being issued, causing the job to be
# added to updated jobs.
if jobWrapper in serviceManager.jobWrappersWithServicesBeingStarted:
logger.debug(
"Got a job to update which is still owned by the service "
"manager: %s",
jobWrapper.jobStoreID,
)
continue
# If some of the jobs successors failed then either fail the job
# or restart it if it has retries left and is a checkpoint job
if jobWrapper.jobStoreID in toilState.hasFailedSuccessors:
# If the job has services running, signal for them to be killed
# once they are killed then the jobWrapper will be re-added to the
# updatedJobs set and then scheduled to be removed
if jobWrapper.jobStoreID in toilState.servicesIssued:
logger.debug(
"Telling job: %s to terminate its services due to successor failure",
jobWrapper.jobStoreID,
)
serviceManager.killServices(
toilState.servicesIssued[jobWrapper.jobStoreID], error=True
)
# If the job has non-service jobs running wait for them to finish
# the job will be re-added to the updated jobs when these jobs are done
elif jobWrapper.jobStoreID in toilState.successorCounts:
logger.debug(
"Job: %s with failed successors still has successor jobs running",
jobWrapper.jobStoreID,
)
continue
# If the job is a checkpoint and has remaining retries then reissue it.
elif (
jobWrapper.checkpoint is not None
and jobWrapper.remainingRetryCount > 0
):
logger.warn(
"Job: %s is being restarted as a checkpoint after the total "
"failure of jobs in its subtree.",
jobWrapper.jobStoreID,
)
jobBatcher.issueJob(
jobWrapper.jobStoreID,
memory=jobWrapper.memory,
cores=jobWrapper.cores,
disk=jobWrapper.disk,
preemptable=jobWrapper.preemptable,
)
else: # Mark it totally failed
logger.debug(
"Job %s is being processed as completely failed",
jobWrapper.jobStoreID,
)
jobBatcher.processTotallyFailedJob(jobWrapper)
# If the jobWrapper has a command it must be run before any successors.
# Similarly, if the job previously failed we rerun it, even if it doesn't have a
# command to run, to eliminate any parts of the stack now completed.
elif jobWrapper.command is not None or resultStatus != 0:
isServiceJob = (
jobWrapper.jobStoreID
in toilState.serviceJobStoreIDToPredecessorJob
)
# If the job has run out of retries or is a service job whose error flag has
# been indicated, fail the job.
if (
jobWrapper.remainingRetryCount == 0
or isServiceJob
and not jobStore.fileExists(jobWrapper.errorJobStoreID)
):
jobBatcher.processTotallyFailedJob(jobWrapper)
logger.warn(
"Job: %s is completely failed", jobWrapper.jobStoreID
)
else:
# Otherwise try the job again
jobBatcher.issueJob(
jobWrapper.jobStoreID,
jobWrapper.memory,
jobWrapper.cores,
jobWrapper.disk,
jobWrapper.preemptable,
)
# If the job has services to run, which have not been started, start them
elif len(jobWrapper.services) > 0:
# Build a map from the service jobs to the job and a map
# of the services created for the job
assert jobWrapper.jobStoreID not in toilState.servicesIssued
toilState.servicesIssued[jobWrapper.jobStoreID] = {}
for serviceJobList in jobWrapper.services:
for serviceTuple in serviceJobList:
serviceID = serviceTuple[0]
assert (
serviceID
not in toilState.serviceJobStoreIDToPredecessorJob
)
toilState.serviceJobStoreIDToPredecessorJob[serviceID] = (
jobWrapper
)
toilState.servicesIssued[jobWrapper.jobStoreID][
serviceID
] = serviceTuple[4:7]
# Use the service manager to start the services
serviceManager.scheduleServices(jobWrapper)
logger.debug(
"Giving job: %s to service manager to schedule its jobs",
jobWrapper.jobStoreID,
)
# There exist successors to run
elif len(jobWrapper.stack) > 0:
assert len(jobWrapper.stack[-1]) > 0
logger.debug(
"Job: %s has %i successors to schedule",
jobWrapper.jobStoreID,
len(jobWrapper.stack[-1]),
)
# Record the number of successors that must be completed before
# the jobWrapper can be considered again
assert jobWrapper.jobStoreID not in toilState.successorCounts
toilState.successorCounts[jobWrapper.jobStoreID] = len(
jobWrapper.stack[-1]
)
# List of successors to schedule
successors = []
# For each successor schedule if all predecessors have been completed
for (
successorJobStoreID,
memory,
cores,
disk,
preemptable,
predecessorID,
) in jobWrapper.stack[-1]:
# Build map from successor to predecessors.
if (
successorJobStoreID
not in toilState.successorJobStoreIDToPredecessorJobs
):
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
] = []
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
].append(jobWrapper)
# Case that the jobWrapper has multiple predecessors
if predecessorID is not None:
# Load the wrapped jobWrapper
job2 = jobStore.load(successorJobStoreID)
# Remove the predecessor from the list of predecessors
job2.predecessorsFinished.add(predecessorID)
# Checkpoint
jobStore.update(job2)
# If the jobs predecessors have all not all completed then
# ignore the jobWrapper
assert len(job2.predecessorsFinished) >= 1
assert (
len(job2.predecessorsFinished) <= job2.predecessorNumber
)
if len(job2.predecessorsFinished) < job2.predecessorNumber:
continue
successors.append(
(successorJobStoreID, memory, cores, disk, preemptable)
)
jobBatcher.issueJobs(successors)
elif jobWrapper.jobStoreID in toilState.servicesIssued:
logger.debug(
"Telling job: %s to terminate its due to the successful completion of its successor jobs",
jobWrapper.jobStoreID,
)
serviceManager.killServices(
toilState.servicesIssued[jobWrapper.jobStoreID], error=False
)
# There are no remaining tasks to schedule within the jobWrapper, but
# we schedule it anyway to allow it to be deleted.
# TODO: An alternative would be simple delete it here and add it to the
# list of jobs to process, or (better) to create an asynchronous
# process that deletes jobs and then feeds them back into the set
# of jobs to be processed
else:
# Remove the job
if jobWrapper.remainingRetryCount > 0:
jobBatcher.issueJob(
jobWrapper.jobStoreID,
config.defaultMemory,
config.defaultCores,
config.defaultDisk,
True,
) # We allow this cleanup to potentially occur on a preemptable instance
logger.debug(
"Job: %s is empty, we are scheduling to clean it up",
jobWrapper.jobStoreID,
)
else:
jobBatcher.processTotallyFailedJob(jobWrapper)
logger.warn(
"Job: %s is empty but completely failed - something is very wrong",
jobWrapper.jobStoreID,
)
# The exit criterion
if (
len(toilState.updatedJobs) == 0
and jobBatcher.getNumberOfJobsIssued() == 0
and serviceManager.serviceJobsIssuedToServiceManager == 0
):
logger.info("No jobs left to run so exiting.")
break
# Start any service jobs available from the service manager
while True:
serviceJobTuple = serviceManager.getServiceJobsToStart(0)
# Stop trying to get jobs when function returns None
if serviceJobTuple is None:
break
serviceJobStoreID, memory, cores, disk = serviceJobTuple
logger.debug("Launching service job: %s", serviceJobStoreID)
# This loop issues the jobs to the batch system because the batch system is not
# thread-safe. FIXME: don't understand this comment
jobBatcher.issueJob(serviceJobStoreID, memory, cores, disk, False)
# Get jobs whose services have started
while True:
jobWrapper = serviceManager.getJobWrapperWhoseServicesAreRunning(0)
if jobWrapper is None: # Stop trying to get jobs when function returns None
break
logger.debug("Job: %s has established its services.", jobWrapper.jobStoreID)
jobWrapper.services = []
toilState.updatedJobs.add((jobWrapper, 0))
# Gather any new, updated jobWrapper from the batch system
updatedJob = batchSystem.getUpdatedBatchJob(2)
if updatedJob is not None:
jobBatchSystemID, result, wallTime = updatedJob
if jobBatcher.hasJob(jobBatchSystemID):
if result == 0:
logger.debug(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s ended successfully",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
)
else:
logger.warn(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s failed with exit value %i",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
result,
)
jobBatcher.processFinishedJob(
jobBatchSystemID, result, wallTime=wallTime
)
else:
logger.warn(
"A result seems to already have been processed "
"for jobWrapper with batch system ID: %i",
jobBatchSystemID,
)
else:
# Process jobs that have gone awry
# In the case that there is nothing happening
# (no updated jobWrapper to gather for 10 seconds)
# check if their are any jobs that have run too long
# (see JobBatcher.reissueOverLongJobs) or which
# have gone missing from the batch system (see JobBatcher.reissueMissingJobs)
if (
time.time() - timeSinceJobsLastRescued >= config.rescueJobsFrequency
): # We only
# rescue jobs every N seconds, and when we have
# apparently exhausted the current jobWrapper supply
jobBatcher.reissueOverLongJobs()
logger.info("Reissued any over long jobs")
hasNoMissingJobs = jobBatcher.reissueMissingJobs()
if hasNoMissingJobs:
timeSinceJobsLastRescued = time.time()
else:
timeSinceJobsLastRescued += 60 # This means we'll try again
# in a minute, providing things are quiet
logger.info("Rescued any (long) missing jobs")
# Check on the associated processes and exit if a failure is detected
statsAndLogging.check()
serviceManager.check()
logger.info("Finished the main loop")
# Consistency check the toil state
assert toilState.updatedJobs == set()
assert toilState.successorCounts == {}
assert toilState.successorJobStoreIDToPredecessorJobs == {}
assert toilState.serviceJobStoreIDToPredecessorJob == {}
assert toilState.servicesIssued == {}
|
def innerLoop(
jobStore,
config,
batchSystem,
toilState,
jobBatcher,
serviceManager,
statsAndLogging,
):
"""
:param toil.jobStores.abstractJobStore.AbstractJobStore jobStore:
:param toil.common.Config config:
:param toil.batchSystems.abstractBatchSystem.AbstractBatchSystem batchSystem:
:param ToilState toilState:
:param JobBatcher jobBatcher:
:param ServiceManager serviceManager:
:param StatsAndLogging statsAndLogging:
"""
# Putting this in separate function for easier reading
# Sets up the timing of the jobWrapper rescuing method
timeSinceJobsLastRescued = time.time()
logger.info("Starting the main loop")
while True:
# Process jobs that are ready to be scheduled/have successors to schedule
if len(toilState.updatedJobs) > 0:
logger.debug(
"Built the jobs list, currently have %i jobs to update and %i jobs issued",
len(toilState.updatedJobs),
jobBatcher.getNumberOfJobsIssued(),
)
updatedJobs = toilState.updatedJobs # The updated jobs to consider below
toilState.updatedJobs = set() # Resetting the list for the next set
for jobWrapper, resultStatus in updatedJobs:
logger.debug(
"Updating status of job: %s with result status: %s",
jobWrapper.jobStoreID,
resultStatus,
)
# This stops a job with services being issued by the serviceManager from
# being considered further in this loop. This catch is necessary because
# the job's service's can fail while being issued, causing the job to be
# added to updated jobs.
if jobWrapper in serviceManager.jobWrappersWithServicesBeingStarted:
logger.debug(
"Got a job to update which is still owned by the service "
"manager: %s",
jobWrapper.jobStoreID,
)
continue
# If some of the jobs successors failed then either fail the job
# or restart it if it has retries left and is a checkpoint job
if jobWrapper.jobStoreID in toilState.hasFailedSuccessors:
# If the job has services running, signal for them to be killed
# once they are killed then the jobWrapper will be re-added to the
# updatedJobs set and then scheduled to be removed
if jobWrapper.jobStoreID in toilState.servicesIssued:
logger.debug(
"Telling job: %s to terminate its services due to successor failure",
jobWrapper.jobStoreID,
)
serviceManager.killServices(
toilState.servicesIssued[jobWrapper.jobStoreID], error=True
)
# If the job has non-service jobs running wait for them to finish
# the job will be re-added to the updated jobs when these jobs are done
elif jobWrapper.jobStoreID in toilState.successorCounts:
logger.debug(
"Job: %s with failed successors still has successor jobs running",
jobWrapper.jobStoreID,
)
continue
# If the job is a checkpoint and has remaining retries then reissue it.
elif (
jobWrapper.checkpoint is not None
and jobWrapper.remainingRetryCount > 0
):
logger.warn(
"Job: %s is being restarted as a checkpoint after the total "
"failure of jobs in its subtree.",
jobWrapper.jobStoreID,
)
jobBatcher.issueJob(
jobWrapper.jobStoreID,
memory=jobWrapper.memory,
cores=jobWrapper.cores,
disk=jobWrapper.disk,
preemptable=jobWrapper.preemptable,
)
else: # Mark it totally failed
logger.debug(
"Job %s is being processed as completely failed",
jobWrapper.jobStoreID,
)
jobBatcher.processTotallyFailedJob(jobWrapper)
# If the jobWrapper has a command it must be run before any successors.
# Similarly, if the job previously failed we rerun it, even if it doesn't have a
# command to run, to eliminate any parts of the stack now completed.
elif jobWrapper.command is not None or resultStatus != 0:
isServiceJob = (
jobWrapper.jobStoreID
in toilState.serviceJobStoreIDToPredecessorJob
)
# If the job has run out of retries or is a service job whose error flag has
# been indicated, fail the job.
if (
jobWrapper.remainingRetryCount == 0
or isServiceJob
and not jobStore.fileExists(jobWrapper.errorJobStoreID)
):
jobBatcher.processTotallyFailedJob(jobWrapper)
logger.warn(
"Job: %s is completely failed", jobWrapper.jobStoreID
)
else:
# Otherwise try the job again
jobBatcher.issueJob(
jobWrapper.jobStoreID,
jobWrapper.memory,
jobWrapper.cores,
jobWrapper.disk,
jobWrapper.preemptable,
)
# If the job has services to run, which have not been started, start them
elif len(jobWrapper.services) > 0:
# Build a map from the service jobs to the job and a map
# of the services created for the job
assert jobWrapper.jobStoreID not in toilState.servicesIssued
toilState.servicesIssued[jobWrapper.jobStoreID] = {}
for serviceJobList in jobWrapper.services:
for serviceTuple in serviceJobList:
serviceID = serviceTuple[0]
assert (
serviceID
not in toilState.serviceJobStoreIDToPredecessorJob
)
toilState.serviceJobStoreIDToPredecessorJob[serviceID] = (
jobWrapper
)
toilState.servicesIssued[jobWrapper.jobStoreID][
serviceID
] = serviceTuple[4:7]
# Use the service manager to start the services
serviceManager.scheduleServices(jobWrapper)
logger.debug(
"Giving job: %s to service manager to schedule its jobs",
jobWrapper.jobStoreID,
)
# There exist successors to run
elif len(jobWrapper.stack) > 0:
assert len(jobWrapper.stack[-1]) > 0
logger.debug(
"Job: %s has %i successors to schedule",
jobWrapper.jobStoreID,
len(jobWrapper.stack[-1]),
)
# Record the number of successors that must be completed before
# the jobWrapper can be considered again
assert jobWrapper.jobStoreID not in toilState.successorCounts
toilState.successorCounts[jobWrapper.jobStoreID] = len(
jobWrapper.stack[-1]
)
# List of successors to schedule
successors = []
# For each successor schedule if all predecessors have been completed
for (
successorJobStoreID,
memory,
cores,
disk,
preemptable,
predecessorID,
) in jobWrapper.stack.pop():
# Build map from successor to predecessors.
if (
successorJobStoreID
not in toilState.successorJobStoreIDToPredecessorJobs
):
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
] = []
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
].append(jobWrapper)
# Case that the jobWrapper has multiple predecessors
if predecessorID is not None:
# Load the wrapped jobWrapper
job2 = jobStore.load(successorJobStoreID)
# Remove the predecessor from the list of predecessors
job2.predecessorsFinished.add(predecessorID)
# Checkpoint
jobStore.update(job2)
# If the jobs predecessors have all not all completed then
# ignore the jobWrapper
assert len(job2.predecessorsFinished) >= 1
assert (
len(job2.predecessorsFinished) <= job2.predecessorNumber
)
if len(job2.predecessorsFinished) < job2.predecessorNumber:
continue
successors.append(
(successorJobStoreID, memory, cores, disk, preemptable)
)
jobBatcher.issueJobs(successors)
elif jobWrapper.jobStoreID in toilState.servicesIssued:
logger.debug(
"Telling job: %s to terminate its due to the successful completion of its successor jobs",
jobWrapper.jobStoreID,
)
serviceManager.killServices(
toilState.servicesIssued[jobWrapper.jobStoreID], error=False
)
# There are no remaining tasks to schedule within the jobWrapper, but
# we schedule it anyway to allow it to be deleted.
# TODO: An alternative would be simple delete it here and add it to the
# list of jobs to process, or (better) to create an asynchronous
# process that deletes jobs and then feeds them back into the set
# of jobs to be processed
else:
# Remove the job
if jobWrapper.remainingRetryCount > 0:
jobBatcher.issueJob(
jobWrapper.jobStoreID,
config.defaultMemory,
config.defaultCores,
config.defaultDisk,
True,
) # We allow this cleanup to potentially occur on a preemptable instance
logger.debug(
"Job: %s is empty, we are scheduling to clean it up",
jobWrapper.jobStoreID,
)
else:
jobBatcher.processTotallyFailedJob(jobWrapper)
logger.warn(
"Job: %s is empty but completely failed - something is very wrong",
jobWrapper.jobStoreID,
)
# The exit criterion
if (
len(toilState.updatedJobs) == 0
and jobBatcher.getNumberOfJobsIssued() == 0
and serviceManager.serviceJobsIssuedToServiceManager == 0
):
logger.info("No jobs left to run so exiting.")
break
# Start any service jobs available from the service manager
while True:
serviceJobTuple = serviceManager.getServiceJobsToStart(0)
# Stop trying to get jobs when function returns None
if serviceJobTuple is None:
break
serviceJobStoreID, memory, cores, disk = serviceJobTuple
logger.debug("Launching service job: %s", serviceJobStoreID)
# This loop issues the jobs to the batch system because the batch system is not
# thread-safe. FIXME: don't understand this comment
jobBatcher.issueJob(serviceJobStoreID, memory, cores, disk, False)
# Get jobs whose services have started
while True:
jobWrapper = serviceManager.getJobWrapperWhoseServicesAreRunning(0)
if jobWrapper is None: # Stop trying to get jobs when function returns None
break
logger.debug("Job: %s has established its services.", jobWrapper.jobStoreID)
jobWrapper.services = []
toilState.updatedJobs.add((jobWrapper, 0))
# Gather any new, updated jobWrapper from the batch system
updatedJob = batchSystem.getUpdatedBatchJob(2)
if updatedJob is not None:
jobBatchSystemID, result, wallTime = updatedJob
if jobBatcher.hasJob(jobBatchSystemID):
if result == 0:
logger.debug(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s ended successfully",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
)
else:
logger.warn(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s failed with exit value %i",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
result,
)
jobBatcher.processFinishedJob(
jobBatchSystemID, result, wallTime=wallTime
)
else:
logger.warn(
"A result seems to already have been processed "
"for jobWrapper with batch system ID: %i",
jobBatchSystemID,
)
else:
# Process jobs that have gone awry
# In the case that there is nothing happening
# (no updated jobWrapper to gather for 10 seconds)
# check if their are any jobs that have run too long
# (see JobBatcher.reissueOverLongJobs) or which
# have gone missing from the batch system (see JobBatcher.reissueMissingJobs)
if (
time.time() - timeSinceJobsLastRescued >= config.rescueJobsFrequency
): # We only
# rescue jobs every N seconds, and when we have
# apparently exhausted the current jobWrapper supply
jobBatcher.reissueOverLongJobs()
logger.info("Reissued any over long jobs")
hasNoMissingJobs = jobBatcher.reissueMissingJobs()
if hasNoMissingJobs:
timeSinceJobsLastRescued = time.time()
else:
timeSinceJobsLastRescued += 60 # This means we'll try again
# in a minute, providing things are quiet
logger.info("Rescued any (long) missing jobs")
# Check on the associated processes and exit if a failure is detected
statsAndLogging.check()
serviceManager.check()
logger.info("Finished the main loop")
# Consistency check the toil state
assert toilState.updatedJobs == set()
assert toilState.successorCounts == {}
assert toilState.successorJobStoreIDToPredecessorJobs == {}
assert toilState.serviceJobStoreIDToPredecessorJob == {}
assert toilState.servicesIssued == {}
|
https://github.com/DataBiosphere/toil/issues/808
|
Repairing job: 1/X/jobPiXIW2
Repairing job: c/C/jobTbIFOx
---TOIL WORKER OUTPUT LOG---
---TOIL WORKER OUTPUT LOG---
Traceback (most recent call last):
File "/cluster/home/jcarmstr/toil/src/toil/worker.py", line 208, in main
jobWrapper = jobStore.load(jobStoreID)
File "/cluster/home/jcarmstr/toil/src/toil/jobStores/fileJobStore.py", line 111, in load
self._checkJobStoreId(jobStoreID)
File "/cluster/home/jcarmstr/toil/src/toil/jobStores/fileJobStore.py", line 329, in _checkJobStoreId
raise NoSuchJobException(jobStoreID)
NoSuchJobException: The job 'c/C/jobTbIFOx' does not exist
Traceback (most recent call last):
File "/cluster/home/jcarmstr/venv/bin/_toil_worker", line 8, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', '_toil_worker')()
File "/cluster/home/jcarmstr/toil/src/toil/worker.py", line 482, in main
jobWrapper = jobStore.load(jobStoreID)
File "/cluster/home/jcarmstr/toil/src/toil/jobStores/fileJobStore.py", line 111, in load
self._checkJobStoreId(jobStoreID)
File "/cluster/home/jcarmstr/toil/src/toil/jobStores/fileJobStore.py", line 329, in _checkJobStoreId
raise NoSuchJobException(jobStoreID)
toil.jobStores.abstractJobStore.NoSuchJobException: The job 'c/C/jobTbIFOx' does not exist
---TOIL WORKER OUTPUT LOG---
|
NoSuchJobException
|
def resolveEntryPoint(entryPoint):
"""
Returns the path to the given entry point (see setup.py) that *should* work on a worker. The
return value may be an absolute or a relative path.
"""
if hasattr(sys, "real_prefix"):
path = os.path.join(os.path.dirname(sys.executable), entryPoint)
# Inside a virtualenv we try to use absolute paths to the entrypoints.
if os.path.isfile(path):
# If the entrypoint is present, Toil must have been installed into the virtualenv (as
# opposed to being included via --system-site-packages). For clusters this means that
# if Toil is installed in a virtualenv on the leader, it must be installed in
# a virtualenv located at the same path on each worker as well.
assert os.access(path, os.X_OK)
return path
else:
# For virtualenv's that have the toil package directory on their sys.path but whose
# bin directory lacks the Toil entrypoints, i.e. where Toil is included via
# --system-site-packages, we rely on PATH just as if we weren't in a virtualenv.
return entryPoint
else:
# Outside a virtualenv it is hard to predict where the entry points got installed. It is
# the reponsibility of the user to ensure that they are present on PATH and point to the
# correct version of Toil. This is still better than an absolute path because it gives
# the user control over Toil's location on both leader and workers.
return entryPoint
|
def resolveEntryPoint(entryPoint):
"""
Returns the path to the given entry point (see setup.py) that *should* work on a worker. The
return value may be an absolute or a relative path.
"""
if hasattr(sys, "real_prefix"):
# Inside a virtualenv we will use absolute paths to the entrypoints. For clusters this
# means that if Toil is installed in a virtualenv on the leader, it must be installed in
# a virtualenv located at the same path on the worker.
path = os.path.join(os.path.dirname(sys.executable), entryPoint)
assert os.path.isfile(path)
assert os.access(path, os.X_OK)
return path
else:
# Outside a virtualenv it is hard to predict where the entry points got installed. It is
# the reponsibility of the user to ensure that they are present on PATH and point to the
# correct version of Toil. This is still better than an absolute path because it gives
# the user control over Toil's location on both leader and workers.
return entryPoint
|
https://github.com/DataBiosphere/toil/issues/929
|
INFO:toil.leader:Starting the main loop
Traceback (most recent call last):
File "./piptest/bin/toil-rnaseq", line 11, in <module>
sys.exit(main())
File "/home/mesosbox/piptest/local/lib/python2.7/site-packages/toil_scripts/rnaseq_cgl/rnaseq_cgl_pipeline.py", line 638, in main
Job.Runner.startToil(Job.wrapJobFn(map_job, download_sample, samples, config), args)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 445, in startToil
return mainLoop(config, batchSystem, jobStore, rootJob, jobCache=jobCache)
File "/usr/local/lib/python2.7/dist-packages/toil/leader.py", line 392, in mainLoop
jobWrapper.cores, jobWrapper.disk)
File "/usr/local/lib/python2.7/dist-packages/toil/leader.py", line 101, in issueJob
jobCommand = ' '.join((resolveEntryPoint('_toil_worker'), self.jobStoreString, jobStoreID))
File "/usr/local/lib/python2.7/dist-packages/toil/__init__.py", line 45, in resolveEntryPoint
assert os.path.isfile(path)
AssertionError
|
AssertionError
|
def _importFile(self, otherCls, url):
if issubclass(otherCls, AWSJobStore):
srcBucket, srcKey = self._extractKeyInfoFromUrl(url, existing=True)
info = self.FileInfo.create(srcKey.name)
info.copyFrom(srcKey)
info.save()
return info.fileID
else:
return super(AWSJobStore, self)._importFile(otherCls, url)
|
def _importFile(self, otherCls, url):
if issubclass(otherCls, AWSJobStore):
srcBucket, srcKey = self._extractKeyInfoFromUrl(url)
info = self.FileInfo.create(srcKey.name)
info.copyFrom(srcKey)
info.save()
return info.fileID
else:
return super(AWSJobStore, self)._importFile(otherCls, url)
|
https://github.com/DataBiosphere/toil/issues/816
|
2016-04-26 14:13:37,623 ERROR:root: Got exception 'S3ResponseError: 400 Bad Request
<Error><Code>InvalidRequest</Code><Message>The specified copy source is larger than the maximum allowable size for a copy source: 5368709120</Message><RequestId>CCB50A66B881B563</RequestId><HostId>1dAVKee/hXEaLCvisKyxUHlWtLbZwqcedEXOsKVSzAoE5ISrsuykzg9teW9vzidQaIqImJxbNgI=</HostId></Error>' while writing 's3://1000genomes/phase3/data/HG01977/sequence_read/SRR360135_1.filt.fastq.gz'
Traceback (most recent call last):
File "/usr/local/bin/cwltoil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'cwltoil')()
File "/usr/local/lib/python2.7/dist-packages/toil/cwl/cwltoil.py", line 580, in main
adjustFiles(builder.job, functools.partial(writeFile, toil.importFile, {}))
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 128, in adjustFiles
adjustFiles(rec[d], op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 131, in adjustFiles
adjustFiles(d, op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 131, in adjustFiles
adjustFiles(d, op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 126, in adjustFiles
rec["path"] = op(rec["path"])
File "/usr/local/lib/python2.7/dist-packages/toil/cwl/cwltoil.py", line 162, in writeFile
index[x] = (writeFunc(rp), os.path.basename(x))
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 570, in importFile
return self.jobStore.importFile(srcUrl)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 260, in importFile
return self._importFile(findJobStoreForUrl(url), url)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 279, in _importFile
info.copyFrom(srcKey)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 826, in copyFrom
headers=self._s3EncryptionHeaders()).version_id
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 865, in _copyKey
headers=headers)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/bucket.py", line 888, in copy_key
response.reason, body)
boto.exception.S3ResponseError: S3ResponseError: 400 Bad Request
<Error><Code>InvalidRequest</Code><Message>The specified copy source is larger than the maximum allowable size for a copy source: 5368709120</Message><RequestId>CCB50A66B881B563</RequestId><HostId>1dAVKee/hXEaLCvisKyxUHlWtLbZwqcedEXOsKVSzAoE5ISrsuykzg9teW9vzidQaIqImJxbNgI=</HostId></Error>
|
boto.exception.S3ResponseError
|
def _readFromUrl(cls, url, writable):
srcBucket, srcKey = cls._extractKeyInfoFromUrl(url, existing=True)
srcKey.get_contents_to_file(writable)
|
def _readFromUrl(cls, url, writable):
srcBucket, srcKey = cls._extractKeyInfoFromUrl(url)
srcKey.get_contents_to_file(writable)
|
https://github.com/DataBiosphere/toil/issues/816
|
2016-04-26 14:13:37,623 ERROR:root: Got exception 'S3ResponseError: 400 Bad Request
<Error><Code>InvalidRequest</Code><Message>The specified copy source is larger than the maximum allowable size for a copy source: 5368709120</Message><RequestId>CCB50A66B881B563</RequestId><HostId>1dAVKee/hXEaLCvisKyxUHlWtLbZwqcedEXOsKVSzAoE5ISrsuykzg9teW9vzidQaIqImJxbNgI=</HostId></Error>' while writing 's3://1000genomes/phase3/data/HG01977/sequence_read/SRR360135_1.filt.fastq.gz'
Traceback (most recent call last):
File "/usr/local/bin/cwltoil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'cwltoil')()
File "/usr/local/lib/python2.7/dist-packages/toil/cwl/cwltoil.py", line 580, in main
adjustFiles(builder.job, functools.partial(writeFile, toil.importFile, {}))
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 128, in adjustFiles
adjustFiles(rec[d], op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 131, in adjustFiles
adjustFiles(d, op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 131, in adjustFiles
adjustFiles(d, op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 126, in adjustFiles
rec["path"] = op(rec["path"])
File "/usr/local/lib/python2.7/dist-packages/toil/cwl/cwltoil.py", line 162, in writeFile
index[x] = (writeFunc(rp), os.path.basename(x))
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 570, in importFile
return self.jobStore.importFile(srcUrl)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 260, in importFile
return self._importFile(findJobStoreForUrl(url), url)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 279, in _importFile
info.copyFrom(srcKey)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 826, in copyFrom
headers=self._s3EncryptionHeaders()).version_id
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 865, in _copyKey
headers=headers)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/bucket.py", line 888, in copy_key
response.reason, body)
boto.exception.S3ResponseError: S3ResponseError: 400 Bad Request
<Error><Code>InvalidRequest</Code><Message>The specified copy source is larger than the maximum allowable size for a copy source: 5368709120</Message><RequestId>CCB50A66B881B563</RequestId><HostId>1dAVKee/hXEaLCvisKyxUHlWtLbZwqcedEXOsKVSzAoE5ISrsuykzg9teW9vzidQaIqImJxbNgI=</HostId></Error>
|
boto.exception.S3ResponseError
|
def _extractKeyInfoFromUrl(url, existing=None):
"""
Extracts bucket and key from URL. The existing parameter determines if a
particular state of existence should be enforced. Note also that if existing
is not True and the key does not exist.
:param existing: determines what the state
:return: (bucket, key)
"""
s3 = boto.connect_s3()
bucket = s3.get_bucket(url.netloc)
key = bucket.get_key(url.path[1:])
if existing is True:
if key is None:
raise RuntimeError("Key does not exist.")
elif existing is False:
if key is not None:
raise RuntimeError("Key exists.")
elif existing is None:
pass
else:
assert False
if key is None:
key = bucket.new_key(url.path[1:])
return bucket, key
|
def _extractKeyInfoFromUrl(url):
"""
:return: (bucket, key)
"""
s3 = boto.connect_s3()
bucket = s3.get_bucket(url.netloc)
key = bucket.new_key(url.path[1:])
return bucket, key
|
https://github.com/DataBiosphere/toil/issues/816
|
2016-04-26 14:13:37,623 ERROR:root: Got exception 'S3ResponseError: 400 Bad Request
<Error><Code>InvalidRequest</Code><Message>The specified copy source is larger than the maximum allowable size for a copy source: 5368709120</Message><RequestId>CCB50A66B881B563</RequestId><HostId>1dAVKee/hXEaLCvisKyxUHlWtLbZwqcedEXOsKVSzAoE5ISrsuykzg9teW9vzidQaIqImJxbNgI=</HostId></Error>' while writing 's3://1000genomes/phase3/data/HG01977/sequence_read/SRR360135_1.filt.fastq.gz'
Traceback (most recent call last):
File "/usr/local/bin/cwltoil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'cwltoil')()
File "/usr/local/lib/python2.7/dist-packages/toil/cwl/cwltoil.py", line 580, in main
adjustFiles(builder.job, functools.partial(writeFile, toil.importFile, {}))
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 128, in adjustFiles
adjustFiles(rec[d], op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 131, in adjustFiles
adjustFiles(d, op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 131, in adjustFiles
adjustFiles(d, op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 126, in adjustFiles
rec["path"] = op(rec["path"])
File "/usr/local/lib/python2.7/dist-packages/toil/cwl/cwltoil.py", line 162, in writeFile
index[x] = (writeFunc(rp), os.path.basename(x))
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 570, in importFile
return self.jobStore.importFile(srcUrl)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 260, in importFile
return self._importFile(findJobStoreForUrl(url), url)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 279, in _importFile
info.copyFrom(srcKey)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 826, in copyFrom
headers=self._s3EncryptionHeaders()).version_id
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 865, in _copyKey
headers=headers)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/bucket.py", line 888, in copy_key
response.reason, body)
boto.exception.S3ResponseError: S3ResponseError: 400 Bad Request
<Error><Code>InvalidRequest</Code><Message>The specified copy source is larger than the maximum allowable size for a copy source: 5368709120</Message><RequestId>CCB50A66B881B563</RequestId><HostId>1dAVKee/hXEaLCvisKyxUHlWtLbZwqcedEXOsKVSzAoE5ISrsuykzg9teW9vzidQaIqImJxbNgI=</HostId></Error>
|
boto.exception.S3ResponseError
|
def copyKeyMultipart(srcKey, dstBucketName, dstKeyName, headers=None):
"""
Copies a key from a source key to a destination key in multiple parts. Note that if the
destination key exists it will be overwritten implicitly, and if it does not exist a new
key will be created.
:param boto.s3.key.Key srcKey: The source key to be copied from.
:param str dstBucketName: The name of the destination bucket for the copy.
:param str dstKeyName: The name of the destination key that will be created or overwritten.
:param dict headers: Any headers that should be passed.
:rtype: boto.s3.multipart.CompletedMultiPartUpload
:return: An object representing the completed upload.
"""
partSize = defaultPartSize
s3 = boto.connect_s3()
headers = headers or {}
totalSize = srcKey.size
# initiate copy
upload = s3.get_bucket(dstBucketName).initiate_multipart_upload(
dstKeyName, headers=headers
)
try:
start = 0
partIndex = itertools.count()
while start < totalSize:
end = min(start + partSize, totalSize)
upload.copy_part_from_key(
src_bucket_name=srcKey.bucket.name,
src_key_name=srcKey.name,
part_num=next(partIndex) + 1,
start=start,
end=end - 1,
headers=headers,
)
start += partSize
except:
upload.cancel_upload()
raise
else:
return upload.complete_upload()
|
def copyKeyMultipart(srcKey, dstBucketName, dstKeyName, headers=None):
"""
Copies a key from a source key to a destination key in multiple parts. Note that if the
destination key exists it will be overwritten implicitly, and if it does not exist a new
key will be created.
:param boto.s3.key.Key srcKey: The source key to be copied from.
:param str dstBucketName: The name of the destination bucket for the copy.
:param str dstKeyName: The name of the destination key that will be created or overwritten.
:param dict headers: Any headers that should be passed.
:rtype: boto.s3.multipart.CompletedMultiPartUpload
:return: An object representing the completed upload.
"""
partSize = defaultPartSize
s3 = boto.connect_s3()
headers = headers or {}
totalSize = srcKey.size
# initiate copy
upload = s3.get_bucket(dstBucketName).initiate_multipart_upload(dstKeyName)
try:
start = 0
partIndex = itertools.count()
while start < totalSize:
end = min(start + partSize, totalSize)
upload.copy_part_from_key(
src_bucket_name=srcKey.bucket.name,
src_key_name=srcKey.name,
part_num=next(partIndex) + 1,
start=start,
end=end - 1,
headers=headers,
)
start += partSize
except:
upload.cancel_upload()
raise
else:
return upload.complete_upload()
|
https://github.com/DataBiosphere/toil/issues/816
|
2016-04-26 14:13:37,623 ERROR:root: Got exception 'S3ResponseError: 400 Bad Request
<Error><Code>InvalidRequest</Code><Message>The specified copy source is larger than the maximum allowable size for a copy source: 5368709120</Message><RequestId>CCB50A66B881B563</RequestId><HostId>1dAVKee/hXEaLCvisKyxUHlWtLbZwqcedEXOsKVSzAoE5ISrsuykzg9teW9vzidQaIqImJxbNgI=</HostId></Error>' while writing 's3://1000genomes/phase3/data/HG01977/sequence_read/SRR360135_1.filt.fastq.gz'
Traceback (most recent call last):
File "/usr/local/bin/cwltoil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'cwltoil')()
File "/usr/local/lib/python2.7/dist-packages/toil/cwl/cwltoil.py", line 580, in main
adjustFiles(builder.job, functools.partial(writeFile, toil.importFile, {}))
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 128, in adjustFiles
adjustFiles(rec[d], op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 131, in adjustFiles
adjustFiles(d, op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 131, in adjustFiles
adjustFiles(d, op)
File "/usr/local/lib/python2.7/dist-packages/cwltool/process.py", line 126, in adjustFiles
rec["path"] = op(rec["path"])
File "/usr/local/lib/python2.7/dist-packages/toil/cwl/cwltoil.py", line 162, in writeFile
index[x] = (writeFunc(rp), os.path.basename(x))
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 570, in importFile
return self.jobStore.importFile(srcUrl)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 260, in importFile
return self._importFile(findJobStoreForUrl(url), url)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 279, in _importFile
info.copyFrom(srcKey)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 826, in copyFrom
headers=self._s3EncryptionHeaders()).version_id
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/aws/jobStore.py", line 865, in _copyKey
headers=headers)
File "/usr/local/lib/python2.7/dist-packages/boto/s3/bucket.py", line 888, in copy_key
response.reason, body)
boto.exception.S3ResponseError: S3ResponseError: 400 Bad Request
<Error><Code>InvalidRequest</Code><Message>The specified copy source is larger than the maximum allowable size for a copy source: 5368709120</Message><RequestId>CCB50A66B881B563</RequestId><HostId>1dAVKee/hXEaLCvisKyxUHlWtLbZwqcedEXOsKVSzAoE5ISrsuykzg9teW9vzidQaIqImJxbNgI=</HostId></Error>
|
boto.exception.S3ResponseError
|
def statusUpdate(self, driver, update):
"""
Invoked when the status of a task has changed (e.g., a slave is lost and so the task is
lost, a task finishes and an executor sends a status update saying so, etc). Note that
returning from this callback _acknowledges_ receipt of this status update! If for
whatever reason the scheduler aborts during this callback (or the process exits) another
status update will be delivered (note, however, that this is currently not true if the
slave sending the status update is lost/fails during that time).
"""
taskID = int(update.task_id.value)
stateName = mesos_pb2.TaskState.Name(update.state)
log.debug("Task %i is in state %s", taskID, stateName)
try:
self.killSet.remove(taskID)
except KeyError:
pass
else:
self.killedSet.add(taskID)
if update.state == mesos_pb2.TASK_FINISHED:
self.__updateState(taskID, 0)
elif update.state == mesos_pb2.TASK_FAILED:
try:
exitStatus = int(update.message)
except ValueError:
exitStatus = 255
log.warning("Task %i failed with message '%s'", taskID, update.message)
else:
log.warning("Task %i failed with exit status %i", taskID, exitStatus)
self.__updateState(taskID, exitStatus)
elif update.state in (
mesos_pb2.TASK_LOST,
mesos_pb2.TASK_KILLED,
mesos_pb2.TASK_ERROR,
):
log.warning(
"Task %i is in unexpected state %s with message '%s'",
taskID,
stateName,
update.message,
)
self.__updateState(taskID, 255)
# Explicitly acknowledge the update if implicit acknowledgements are not being used.
if not self.implicitAcknowledgements:
driver.acknowledgeStatusUpdate(update)
|
def statusUpdate(self, driver, update):
"""
Invoked when the status of a task has changed (e.g., a slave is lost and so the task is
lost, a task finishes and an executor sends a status update saying so, etc). Note that
returning from this callback _acknowledges_ receipt of this status update! If for
whatever reason the scheduler aborts during this callback (or the process exits) another
status update will be delivered (note, however, that this is currently not true if the
slave sending the status update is lost/fails during that time).
"""
taskID = int(update.task_id.value)
stateName = mesos_pb2.TaskState.Name(update.state)
log.debug("Task %i is in state %s", taskID, stateName)
try:
self.killSet.remove(taskID)
except KeyError:
pass
else:
self.killedSet.add(taskID)
if update.state == mesos_pb2.TASK_FINISHED:
self.__updateState(taskID, 0)
elif update.state == mesos_pb2.TASK_FAILED:
exitStatus = int(update.message)
log.warning("Task %i failed with exit status %i", taskID, exitStatus)
self.__updateState(taskID, exitStatus)
elif update.state in (
mesos_pb2.TASK_LOST,
mesos_pb2.TASK_KILLED,
mesos_pb2.TASK_ERROR,
):
log.warning(
"Task %i is in unexpected state %s with message '%s'",
taskID,
stateName,
update.message,
)
self.__updateState(taskID, 255)
# Explicitly acknowledge the update if implicit acknowledgements are not being used.
if not self.implicitAcknowledgements:
driver.acknowledgeStatusUpdate(update)
|
https://github.com/DataBiosphere/toil/issues/671
|
Failed to call scheduler's statusUpdate
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 458, in statusUpdate
exitStatus = int(update.message)
ValueError: invalid literal for int() with base 10: ''
|
ValueError
|
def launchTask(self, driver, task):
"""
Invoked by SchedulerDriver when a Mesos task should be launched by this executor
"""
def runTask():
log.debug("Running task %s", task.task_id.value)
sendUpdate(mesos_pb2.TASK_RUNNING)
# This is where task.data is first invoked. Using this position to setup cleanupInfo
taskData = pickle.loads(task.data)
if self.workerCleanupInfo is not None:
assert self.workerCleanupInfo == taskData.workerCleanupInfo
else:
self.workerCleanupInfo = taskData.workerCleanupInfo
try:
popen = runJob(taskData)
self.runningTasks[task.task_id.value] = popen.pid
try:
exitStatus = popen.wait()
if 0 == exitStatus:
sendUpdate(mesos_pb2.TASK_FINISHED)
elif -9 == exitStatus:
sendUpdate(mesos_pb2.TASK_KILLED)
else:
sendUpdate(mesos_pb2.TASK_FAILED, message=str(exitStatus))
finally:
del self.runningTasks[task.task_id.value]
except:
exc_info = sys.exc_info()
log.error("Exception while running task:", exc_info=exc_info)
exc_type, exc_value, exc_trace = exc_info
sendUpdate(
mesos_pb2.TASK_FAILED,
message="".join(traceback.format_exception_only(exc_type, exc_value)),
)
def runJob(job):
"""
:type job: toil.batchSystems.mesos.ToilJob
:rtype: subprocess.Popen
"""
if job.userScript:
job.userScript.register()
log.debug("Invoking command: '%s'", job.command)
with self.popenLock:
return subprocess.Popen(
job.command, shell=True, env=dict(os.environ, **job.environment)
)
def sendUpdate(taskState, message=""):
log.debug("Sending status update ...")
status = mesos_pb2.TaskStatus()
status.task_id.value = task.task_id.value
status.message = message
status.state = taskState
driver.sendStatusUpdate(status)
log.debug("Sent status update")
thread = threading.Thread(target=runTask)
thread.start()
|
def launchTask(self, driver, task):
"""
Invoked by SchedulerDriver when a Mesos task should be launched by this executor
"""
def runTask():
log.debug("Running task %s", task.task_id.value)
sendUpdate(mesos_pb2.TASK_RUNNING)
# This is where task.data is first invoked. Using this position to setup cleanupInfo
taskData = pickle.loads(task.data)
if self.workerCleanupInfo is not None:
assert self.workerCleanupInfo == taskData.workerCleanupInfo
else:
self.workerCleanupInfo = taskData.workerCleanupInfo
popen = runJob(taskData)
self.runningTasks[task.task_id.value] = popen.pid
try:
exitStatus = popen.wait()
if 0 == exitStatus:
sendUpdate(mesos_pb2.TASK_FINISHED)
elif -9 == exitStatus:
sendUpdate(mesos_pb2.TASK_KILLED)
else:
sendUpdate(mesos_pb2.TASK_FAILED, message=str(exitStatus))
except:
exc_type, exc_value, exc_trace = sys.exc_info()
sendUpdate(
mesos_pb2.TASK_FAILED,
message=str(traceback.format_exception_only(exc_type, exc_value)),
)
finally:
del self.runningTasks[task.task_id.value]
def runJob(job):
"""
:type job: toil.batchSystems.mesos.ToilJob
:rtype: subprocess.Popen
"""
if job.userScript:
job.userScript.register()
log.debug("Invoking command: '%s'", job.command)
with self.popenLock:
return subprocess.Popen(
job.command, shell=True, env=dict(os.environ, **job.environment)
)
def sendUpdate(taskState, message=""):
log.debug("Sending status update ...")
status = mesos_pb2.TaskStatus()
status.task_id.value = task.task_id.value
status.message = message
status.state = taskState
driver.sendStatusUpdate(status)
log.debug("Sent status update")
thread = threading.Thread(target=runTask)
thread.start()
|
https://github.com/DataBiosphere/toil/issues/671
|
Failed to call scheduler's statusUpdate
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 458, in statusUpdate
exitStatus = int(update.message)
ValueError: invalid literal for int() with base 10: ''
|
ValueError
|
def runTask():
log.debug("Running task %s", task.task_id.value)
sendUpdate(mesos_pb2.TASK_RUNNING)
# This is where task.data is first invoked. Using this position to setup cleanupInfo
taskData = pickle.loads(task.data)
if self.workerCleanupInfo is not None:
assert self.workerCleanupInfo == taskData.workerCleanupInfo
else:
self.workerCleanupInfo = taskData.workerCleanupInfo
try:
popen = runJob(taskData)
self.runningTasks[task.task_id.value] = popen.pid
try:
exitStatus = popen.wait()
if 0 == exitStatus:
sendUpdate(mesos_pb2.TASK_FINISHED)
elif -9 == exitStatus:
sendUpdate(mesos_pb2.TASK_KILLED)
else:
sendUpdate(mesos_pb2.TASK_FAILED, message=str(exitStatus))
finally:
del self.runningTasks[task.task_id.value]
except:
exc_info = sys.exc_info()
log.error("Exception while running task:", exc_info=exc_info)
exc_type, exc_value, exc_trace = exc_info
sendUpdate(
mesos_pb2.TASK_FAILED,
message="".join(traceback.format_exception_only(exc_type, exc_value)),
)
|
def runTask():
log.debug("Running task %s", task.task_id.value)
sendUpdate(mesos_pb2.TASK_RUNNING)
# This is where task.data is first invoked. Using this position to setup cleanupInfo
taskData = pickle.loads(task.data)
if self.workerCleanupInfo is not None:
assert self.workerCleanupInfo == taskData.workerCleanupInfo
else:
self.workerCleanupInfo = taskData.workerCleanupInfo
popen = runJob(taskData)
self.runningTasks[task.task_id.value] = popen.pid
try:
exitStatus = popen.wait()
if 0 == exitStatus:
sendUpdate(mesos_pb2.TASK_FINISHED)
elif -9 == exitStatus:
sendUpdate(mesos_pb2.TASK_KILLED)
else:
sendUpdate(mesos_pb2.TASK_FAILED, message=str(exitStatus))
except:
exc_type, exc_value, exc_trace = sys.exc_info()
sendUpdate(
mesos_pb2.TASK_FAILED,
message=str(traceback.format_exception_only(exc_type, exc_value)),
)
finally:
del self.runningTasks[task.task_id.value]
|
https://github.com/DataBiosphere/toil/issues/671
|
Failed to call scheduler's statusUpdate
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 458, in statusUpdate
exitStatus = int(update.message)
ValueError: invalid literal for int() with base 10: ''
|
ValueError
|
def launchTask(self, driver, task):
"""
Invoked by SchedulerDriver when a Mesos task should be launched by this executor
"""
def runTask():
log.debug("Running task %s", task.task_id.value)
sendUpdate(mesos_pb2.TASK_RUNNING)
try:
popen = runJob(pickle.loads(task.data))
self.runningTasks[task.task_id.value] = popen.pid
try:
exitStatus = popen.wait()
if 0 == exitStatus:
sendUpdate(mesos_pb2.TASK_FINISHED)
elif -9 == exitStatus:
sendUpdate(mesos_pb2.TASK_KILLED)
else:
sendUpdate(mesos_pb2.TASK_FAILED, message=str(exitStatus))
finally:
del self.runningTasks[task.task_id.value]
except:
exc_info = sys.exc_info()
log.error("Exception while running task:", exc_info=exc_info)
exc_type, exc_value, exc_trace = exc_info
sendUpdate(
mesos_pb2.TASK_FAILED,
message="".join(traceback.format_exception_only(exc_type, exc_value)),
)
def runJob(job):
"""
:type job: toil.batchSystems.mesos.ToilJob
:rtype: subprocess.Popen
"""
if job.userScript:
job.userScript.register()
log.debug("Invoking command: '%s'", job.command)
with self.popenLock:
return subprocess.Popen(
job.command, shell=True, env=dict(os.environ, **job.environment)
)
def sendUpdate(taskState, message=""):
log.debug("Sending status update ...")
status = mesos_pb2.TaskStatus()
status.task_id.value = task.task_id.value
status.message = message
status.state = taskState
driver.sendStatusUpdate(status)
log.debug("Sent status update")
thread = threading.Thread(target=runTask)
thread.start()
|
def launchTask(self, driver, task):
"""
Invoked by SchedulerDriver when a Mesos task should be launched by this executor
"""
def runTask():
log.debug("Running task %s", task.task_id.value)
sendUpdate(mesos_pb2.TASK_RUNNING)
popen = runJob(pickle.loads(task.data))
self.runningTasks[task.task_id.value] = popen.pid
try:
exitStatus = popen.wait()
if 0 == exitStatus:
sendUpdate(mesos_pb2.TASK_FINISHED)
elif -9 == exitStatus:
sendUpdate(mesos_pb2.TASK_KILLED)
else:
sendUpdate(mesos_pb2.TASK_FAILED, message=str(exitStatus))
except:
exc_type, exc_value, exc_trace = sys.exc_info()
sendUpdate(
mesos_pb2.TASK_FAILED,
message=str(traceback.format_exception_only(exc_type, exc_value)),
)
finally:
del self.runningTasks[task.task_id.value]
def runJob(job):
"""
:type job: toil.batchSystems.mesos.ToilJob
:rtype: subprocess.Popen
"""
if job.userScript:
job.userScript.register()
log.debug("Invoking command: '%s'", job.command)
with self.popenLock:
return subprocess.Popen(
job.command, shell=True, env=dict(os.environ, **job.environment)
)
def sendUpdate(taskState, message=""):
log.debug("Sending status update ...")
status = mesos_pb2.TaskStatus()
status.task_id.value = task.task_id.value
status.message = message
status.state = taskState
driver.sendStatusUpdate(status)
log.debug("Sent status update")
thread = threading.Thread(target=runTask)
thread.start()
|
https://github.com/DataBiosphere/toil/issues/671
|
Failed to call scheduler's statusUpdate
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 458, in statusUpdate
exitStatus = int(update.message)
ValueError: invalid literal for int() with base 10: ''
|
ValueError
|
def runTask():
log.debug("Running task %s", task.task_id.value)
sendUpdate(mesos_pb2.TASK_RUNNING)
try:
popen = runJob(pickle.loads(task.data))
self.runningTasks[task.task_id.value] = popen.pid
try:
exitStatus = popen.wait()
if 0 == exitStatus:
sendUpdate(mesos_pb2.TASK_FINISHED)
elif -9 == exitStatus:
sendUpdate(mesos_pb2.TASK_KILLED)
else:
sendUpdate(mesos_pb2.TASK_FAILED, message=str(exitStatus))
finally:
del self.runningTasks[task.task_id.value]
except:
exc_info = sys.exc_info()
log.error("Exception while running task:", exc_info=exc_info)
exc_type, exc_value, exc_trace = exc_info
sendUpdate(
mesos_pb2.TASK_FAILED,
message="".join(traceback.format_exception_only(exc_type, exc_value)),
)
|
def runTask():
log.debug("Running task %s", task.task_id.value)
sendUpdate(mesos_pb2.TASK_RUNNING)
popen = runJob(pickle.loads(task.data))
self.runningTasks[task.task_id.value] = popen.pid
try:
exitStatus = popen.wait()
if 0 == exitStatus:
sendUpdate(mesos_pb2.TASK_FINISHED)
elif -9 == exitStatus:
sendUpdate(mesos_pb2.TASK_KILLED)
else:
sendUpdate(mesos_pb2.TASK_FAILED, message=str(exitStatus))
except:
exc_type, exc_value, exc_trace = sys.exc_info()
sendUpdate(
mesos_pb2.TASK_FAILED,
message=str(traceback.format_exception_only(exc_type, exc_value)),
)
finally:
del self.runningTasks[task.task_id.value]
|
https://github.com/DataBiosphere/toil/issues/671
|
Failed to call scheduler's statusUpdate
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 458, in statusUpdate
exitStatus = int(update.message)
ValueError: invalid literal for int() with base 10: ''
|
ValueError
|
def __updateState(self, intID, exitStatus):
self.updatedJobsQueue.put((intID, exitStatus))
try:
del self.runningJobMap[intID]
except KeyError:
log.warning(
"Cannot find %i among running jobs. "
"Sent update about its exit code of %i anyways.",
intID,
exitStatus,
)
|
def __updateState(self, intID, exitStatus):
self.updatedJobsQueue.put((intID, exitStatus))
del self.runningJobMap[intID]
|
https://github.com/DataBiosphere/toil/issues/671
|
Failed to call scheduler's statusUpdate
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 458, in statusUpdate
exitStatus = int(update.message)
ValueError: invalid literal for int() with base 10: ''
|
ValueError
|
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store.
If the local file is a file returned by :func:`toil.job.Job.FileStore.getLocalTempFile` \
or is in a directory, or, recursively, a subdirectory, returned by \
:func:`toil.job.Job.FileStore.getLocalTempDir` then the write is asynchronous, \
so further modifications during execution to the file pointed by \
localFileName will result in undetermined behavior. Otherwise, the \
method will block until the file is written to the file store.
:param string localFileName: The path to the local file to upload.
:param Boolean cleanup: if True then the copy of the global file will \
be deleted once the job and all its successors have completed running. \
If not the global file must be deleted manually.
:returns: an ID that can be used to retrieve the file.
"""
# Put the file into the cache if it is a path within localTempDir
absLocalFileName = os.path.abspath(localFileName)
cleanupID = None if not cleanup else self.jobWrapper.jobStoreID
if absLocalFileName.startswith(self.localTempDir):
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
fileHandle = open(absLocalFileName, "r")
if os.stat(absLocalFileName).st_uid == os.getuid():
# Chmod if permitted to make file read only to try to prevent accidental user modification
os.chmod(absLocalFileName, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
with self._lockFilesLock:
self._lockFiles.add(jobStoreFileID)
# A file handle added to the queue allows the asyncWrite threads to remove their jobID from _lockFiles.
# Therefore, a file should only be added after its fileID is added to _lockFiles
self.queue.put((fileHandle, jobStoreFileID))
self._jobStoreFileIDToCacheLocation[jobStoreFileID] = absLocalFileName
else:
# Write the file directly to the file store
jobStoreFileID = self.jobStore.writeFile(localFileName, cleanupID)
return jobStoreFileID
|
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store.
If the local file is a file returned by :func:`toil.job.Job.FileStore.getLocalTempFile` \
or is in a directory, or, recursively, a subdirectory, returned by \
:func:`toil.job.Job.FileStore.getLocalTempDir` then the write is asynchronous, \
so further modifications during execution to the file pointed by \
localFileName will result in undetermined behavior. Otherwise, the \
method will block until the file is written to the file store.
:param string localFileName: The path to the local file to upload.
:param Boolean cleanup: if True then the copy of the global file will \
be deleted once the job and all its successors have completed running. \
If not the global file must be deleted manually.
:returns: an ID that can be used to retrieve the file.
"""
# Put the file into the cache if it is a path within localTempDir
absLocalFileName = os.path.abspath(localFileName)
cleanupID = None if not cleanup else self.jobWrapper.jobStoreID
if absLocalFileName.startswith(self.localTempDir):
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
self.queue.put((open(absLocalFileName, "r"), jobStoreFileID))
if os.stat(absLocalFileName).st_uid == os.getuid():
# Chmod if permitted to make file read only to try to prevent accidental user modification
os.chmod(absLocalFileName, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
with self._lockFilesLock:
self._lockFiles.add(jobStoreFileID)
self._jobStoreFileIDToCacheLocation[jobStoreFileID] = absLocalFileName
else:
# Write the file directly to the file store
jobStoreFileID = self.jobStore.writeFile(localFileName, cleanupID)
return jobStoreFileID
|
https://github.com/DataBiosphere/toil/issues/582
|
Exception in thread Thread-13:
WARNING:toil.leader:t/V/job0sJG0Z: Traceback (most recent call last):
WARNING:toil.leader:t/V/job0sJG0Z: File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
WARNING:toil.leader:t/V/job0sJG0Z: self.run()
WARNING:toil.leader:t/V/job0sJG0Z: File "/usr/lib/python2.7/threading.py", line 763, in run
WARNING:toil.leader:t/V/job0sJG0Z: self.__target(*self.__args, **self.__kwargs)
WARNING:toil.leader:t/V/job0sJG0Z: File "/mnt/ephemeral/workspace/toil-branches/master/src/toil/job.py", line 506, in asyncWrite
WARNING:toil.leader:t/V/job0sJG0Z: self._lockFiles.remove(jobStoreFileID)
WARNING:toil.leader:t/V/job0sJG0Z: KeyError: 'I/O/tmpOiPrLG.tmp'
|
KeyError
|
def statsAndLoggingAggregatorProcess(jobStore, stop):
"""
The following function is used for collating stats/reporting log messages from the workers.
Works inside of a separate process, collates as long as the stop flag is not True.
"""
# Overall timing
startTime = time.time()
startClock = getTotalCpuTime()
def callback(fileHandle):
stats = json.load(fileHandle, object_hook=Expando)
try:
logs = stats.workers.log
except AttributeError:
# To be expected if there were no calls to logToMaster()
pass
else:
for message in logs:
logger.log(
int(message.level),
"Got message from job at time %s: %s",
time.strftime("%m-%d-%Y %H:%M:%S"),
message.text,
)
try:
logs = stats.logs
except AttributeError:
pass
else:
for log in logs:
logger.info("%s: %s", log.jobStoreID, log.text)
while True:
# This is a indirect way of getting a message to the process to exit
if not stop.empty():
jobStore.readStatsAndLogging(callback)
break
if jobStore.readStatsAndLogging(callback) == 0:
time.sleep(0.5) # Avoid cycling too fast
# Finish the stats file
text = json.dumps(
dict(
total_time=str(time.time() - startTime),
total_clock=str(getTotalCpuTime() - startClock),
)
)
jobStore.writeStatsAndLogging(text)
|
def statsAndLoggingAggregatorProcess(jobStore, stop):
"""
The following function is used for collating stats/reporting log messages from the workers.
Works inside of a separate process, collates as long as the stop flag is not True.
"""
# Overall timing
startTime = time.time()
startClock = getTotalCpuTime()
def callback(fileHandle):
stats = json.load(fileHandle, object_hook=Expando)
workers = stats.workers
try:
logs = workers.log
except AttributeError:
# To be expected if there were no calls to logToMaster()
pass
else:
for message in logs:
logger.log(
int(message.level),
"Got message from job at time: %s : %s",
time.strftime("%m-%d-%Y %H:%M:%S"),
message.text,
)
for log in stats.logs:
logger.info("%s: %s", log.jobStoreID, log.text)
while True:
# This is a indirect way of getting a message to the process to exit
if not stop.empty():
jobStore.readStatsAndLogging(callback)
break
if jobStore.readStatsAndLogging(callback) == 0:
time.sleep(0.5) # Avoid cycling too fast
# Finish the stats file
text = json.dumps(
dict(
total_time=str(time.time() - startTime),
total_clock=str(getTotalCpuTime() - startClock),
)
)
jobStore.writeStatsAndLogging(text)
|
https://github.com/DataBiosphere/toil/issues/609
|
INFO:toil.lib.bioio:Logging set at level: INFO
INFO:toil.common:Using the single machine batch system
INFO:toil.jobStores.fileJobStore:Jobstore directory is: /Users/sfrazer/projects/toil/toilWorkflow
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (8).
WARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (8589934592).
INFO:toil.batchSystems.singleMachine:Setting up the thread pool with 80 workers, given a minimum CPU fraction of 0.100000 and a maximum CPU value of 8.
INFO:toil.common:Written the environment for the jobs to the environment file
INFO:toil.leader:Checked batch system has no running jobs and no updated jobs
INFO:toil.leader:Found 1 jobs to start and 0 jobs with successors to run
INFO:toil.leader:Starting the main loop
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow b/V/jobUulNcW'.
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow X/J/joby_OmSG'.
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow k/r/jobSMCrWI'.
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow b/V/jobUulNcW'.
Process Process-1:
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "/Users/sfrazer/projects/toil/src/toil/leader.py", line 67, in statsAndLoggingAggregatorProcess
if jobStore.readStatsAndLogging(callback) == 0:
File "/Users/sfrazer/projects/toil/src/toil/jobStores/fileJobStore.py", line 238, in readStatsAndLogging
callback(fH)
File "/Users/sfrazer/projects/toil/src/toil/leader.py", line 47, in callback
workers = stats.workers
AttributeError: 'Expando' object has no attribute 'workers'
INFO:toil.leader:Only failed jobs and their dependents (0 total) are remaining, so exiting.
INFO:toil.leader:Finished the main loop
INFO:toil.leader:Waiting for stats and logging collator process to finish
INFO:toil.leader:Stats/logging finished collating in 0.000979900360107 seconds
None
|
AttributeError
|
def callback(fileHandle):
stats = json.load(fileHandle, object_hook=Expando)
try:
logs = stats.workers.log
except AttributeError:
# To be expected if there were no calls to logToMaster()
pass
else:
for message in logs:
logger.log(
int(message.level),
"Got message from job at time %s: %s",
time.strftime("%m-%d-%Y %H:%M:%S"),
message.text,
)
try:
logs = stats.logs
except AttributeError:
pass
else:
for log in logs:
logger.info("%s: %s", log.jobStoreID, log.text)
|
def callback(fileHandle):
stats = json.load(fileHandle, object_hook=Expando)
workers = stats.workers
try:
logs = workers.log
except AttributeError:
# To be expected if there were no calls to logToMaster()
pass
else:
for message in logs:
logger.log(
int(message.level),
"Got message from job at time: %s : %s",
time.strftime("%m-%d-%Y %H:%M:%S"),
message.text,
)
for log in stats.logs:
logger.info("%s: %s", log.jobStoreID, log.text)
|
https://github.com/DataBiosphere/toil/issues/609
|
INFO:toil.lib.bioio:Logging set at level: INFO
INFO:toil.common:Using the single machine batch system
INFO:toil.jobStores.fileJobStore:Jobstore directory is: /Users/sfrazer/projects/toil/toilWorkflow
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (8).
WARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (8589934592).
INFO:toil.batchSystems.singleMachine:Setting up the thread pool with 80 workers, given a minimum CPU fraction of 0.100000 and a maximum CPU value of 8.
INFO:toil.common:Written the environment for the jobs to the environment file
INFO:toil.leader:Checked batch system has no running jobs and no updated jobs
INFO:toil.leader:Found 1 jobs to start and 0 jobs with successors to run
INFO:toil.leader:Starting the main loop
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow b/V/jobUulNcW'.
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow X/J/joby_OmSG'.
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow k/r/jobSMCrWI'.
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow b/V/jobUulNcW'.
Process Process-1:
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "/Users/sfrazer/projects/toil/src/toil/leader.py", line 67, in statsAndLoggingAggregatorProcess
if jobStore.readStatsAndLogging(callback) == 0:
File "/Users/sfrazer/projects/toil/src/toil/jobStores/fileJobStore.py", line 238, in readStatsAndLogging
callback(fH)
File "/Users/sfrazer/projects/toil/src/toil/leader.py", line 47, in callback
workers = stats.workers
AttributeError: 'Expando' object has no attribute 'workers'
INFO:toil.leader:Only failed jobs and their dependents (0 total) are remaining, so exiting.
INFO:toil.leader:Finished the main loop
INFO:toil.leader:Waiting for stats and logging collator process to finish
INFO:toil.leader:Stats/logging finished collating in 0.000979900360107 seconds
None
|
AttributeError
|
def mainLoop(config, batchSystem, jobStore, rootJobWrapper):
"""
This is the main loop from which jobs are issued and processed.
:raises: toil.leader.FailedJobsException if at the end of function their remain
failed jobs
:return: The return value of the root job's run function.
"""
##########################################
# Get a snap shot of the current state of the jobs in the jobStore
##########################################
toilState = ToilState(jobStore, rootJobWrapper)
##########################################
# Load the jobBatcher class - used to track jobs submitted to the batch-system
##########################################
# Kill any jobs on the batch system queue from the last time.
assert (
len(batchSystem.getIssuedBatchJobIDs()) == 0
) # Batch system must start with no active jobs!
logger.info("Checked batch system has no running jobs and no updated jobs")
jobBatcher = JobBatcher(config, batchSystem, jobStore, toilState)
logger.info(
"Found %s jobs to start and %i jobs with successors to run",
len(toilState.updatedJobs),
len(toilState.successorCounts),
)
##########################################
# Start the stats/logging aggregation process
##########################################
stopStatsAndLoggingAggregatorProcess = Queue() # When this is s
worker = Process(
target=statsAndLoggingAggregatorProcess,
args=(jobStore, stopStatsAndLoggingAggregatorProcess),
)
worker.start()
##########################################
# The main loop in which jobs are scheduled/processed
##########################################
# Sets up the timing of the jobWrapper rescuing method
timeSinceJobsLastRescued = time.time()
# Number of jobs that can not be completed successful after exhausting retries
totalFailedJobs = 0
logger.info("Starting the main loop")
while True:
##########################################
# Process jobs that are ready to be scheduled/have successors to schedule
##########################################
if len(toilState.updatedJobs) > 0:
logger.debug(
"Built the jobs list, currently have %i jobs to update and %i jobs issued",
len(toilState.updatedJobs),
jobBatcher.getNumberOfJobsIssued(),
)
for jobWrapper, resultStatus in toilState.updatedJobs:
# If the jobWrapper has a command it must be run before any successors
# Similarly, if the job previously failed we rerun it, even if it doesn't have a command to
# run, to eliminate any parts of the stack now completed.
if jobWrapper.command != None or resultStatus != 0:
if jobWrapper.remainingRetryCount > 0:
jobBatcher.issueJob(
jobWrapper.jobStoreID,
jobWrapper.memory,
jobWrapper.cores,
jobWrapper.disk,
)
else:
totalFailedJobs += 1
logger.warn(
"Job: %s is completely failed", jobWrapper.jobStoreID
)
# There exist successors to run
elif len(jobWrapper.stack) > 0:
assert len(jobWrapper.stack[-1]) > 0
logger.debug(
"Job: %s has %i successors to schedule",
jobWrapper.jobStoreID,
len(jobWrapper.stack[-1]),
)
# Record the number of successors that must be completed before
# the jobWrapper can be considered again
assert jobWrapper not in toilState.successorCounts
toilState.successorCounts[jobWrapper] = len(jobWrapper.stack[-1])
# List of successors to schedule
successors = []
# For each successor schedule if all predecessors have been completed
for (
successorJobStoreID,
memory,
cores,
disk,
predecessorID,
) in jobWrapper.stack.pop():
# Build map from successor to predecessors.
if (
successorJobStoreID
not in toilState.successorJobStoreIDToPredecessorJobs
):
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
] = []
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
].append(jobWrapper)
# Case that the jobWrapper has multiple predecessors
if predecessorID != None:
# Load the wrapped jobWrapper
job2 = jobStore.load(successorJobStoreID)
# Remove the predecessor from the list of predecessors
job2.predecessorsFinished.add(predecessorID)
# Checkpoint
jobStore.update(job2)
# If the jobs predecessors have all not all completed then
# ignore the jobWrapper
assert len(job2.predecessorsFinished) >= 1
assert (
len(job2.predecessorsFinished) <= job2.predecessorNumber
)
if len(job2.predecessorsFinished) < job2.predecessorNumber:
continue
successors.append((successorJobStoreID, memory, cores, disk))
jobBatcher.issueJobs(successors)
# There are no remaining tasks to schedule within the jobWrapper, but
# we schedule it anyway to allow it to be deleted.
# TODO: An alternative would be simple delete it here and add it to the
# list of jobs to process, or (better) to create an asynchronous
# process that deletes jobs and then feeds them back into the set
# of jobs to be processed
else:
if jobWrapper.remainingRetryCount > 0:
jobBatcher.issueJob(
jobWrapper.jobStoreID,
config.defaultMemory,
config.defaultCores,
config.defaultDisk,
)
logger.debug(
"Job: %s is empty, we are scheduling to clean it up",
jobWrapper.jobStoreID,
)
else:
totalFailedJobs += 1
logger.warn(
"Job: %s is empty but completely failed - something is very wrong",
jobWrapper.jobStoreID,
)
toilState.updatedJobs = set() # We've considered them all, so reset
##########################################
# The exit criterion
##########################################
if jobBatcher.getNumberOfJobsIssued() == 0:
logger.info(
"Only failed jobs and their dependents (%i total) are remaining, so exiting.",
totalFailedJobs,
)
break
##########################################
# Gather any new, updated jobWrapper from the batch system
##########################################
# Asks the batch system what jobs have been completed,
# give
updatedJob = batchSystem.getUpdatedBatchJob(10)
if updatedJob != None:
jobBatchSystemID, result = updatedJob
if jobBatcher.hasJob(jobBatchSystemID):
if result == 0:
logger.debug(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s ended successfully",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
)
else:
logger.warn(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s failed with exit value %i",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
result,
)
jobBatcher.processFinishedJob(jobBatchSystemID, result)
else:
logger.warn(
"A result seems to already have been processed "
"for jobWrapper with batch system ID: %i",
jobBatchSystemID,
)
else:
##########################################
# Process jobs that have gone awry
##########################################
# In the case that there is nothing happening
# (no updated jobWrapper to gather for 10 seconds)
# check if their are any jobs that have run too long
# (see JobBatcher.reissueOverLongJobs) or which
# have gone missing from the batch system (see JobBatcher.reissueMissingJobs)
if (
time.time() - timeSinceJobsLastRescued >= config.rescueJobsFrequency
): # We only
# rescue jobs every N seconds, and when we have
# apparently exhausted the current jobWrapper supply
jobBatcher.reissueOverLongJobs()
logger.info("Reissued any over long jobs")
hasNoMissingJobs = jobBatcher.reissueMissingJobs()
if hasNoMissingJobs:
timeSinceJobsLastRescued = time.time()
else:
timeSinceJobsLastRescued += 60 # This means we'll try again
# in a minute, providing things are quiet
logger.info("Rescued any (long) missing jobs")
logger.info("Finished the main loop")
##########################################
# Finish up the stats/logging aggregation process
##########################################
logger.info("Waiting for stats and logging collator process to finish ...")
startTime = time.time()
stopStatsAndLoggingAggregatorProcess.put(True)
worker.join()
if worker.exitcode != 0:
raise RuntimeError(
"Stats/logging collator failed with exit code %d." % worker.exitcode
)
logger.info(
"... finished collating stats and logs. Took %s seconds",
time.time() - startTime,
)
# in addition to cleaning on exceptions, onError should clean if there are any failed jobs
# Parse out the return value from the root job
with jobStore.readSharedFileStream("rootJobReturnValue") as fH:
jobStoreFileID = fH.read()
with jobStore.readFileStream(jobStoreFileID) as fH:
rootJobReturnValue = cPickle.load(fH)
if totalFailedJobs > 0:
if config.clean == "onError" or config.clean == "always":
jobStore.deleteJobStore()
raise FailedJobsException(config.jobStore, totalFailedJobs)
if config.clean == "onSuccess" or config.clean == "always":
jobStore.deleteJobStore()
return rootJobReturnValue
|
def mainLoop(config, batchSystem, jobStore, rootJobWrapper):
"""
This is the main loop from which jobs are issued and processed.
:raises: toil.leader.FailedJobsException if at the end of function their remain
failed jobs
:return: The return value of the root job's run function.
"""
##########################################
# Get a snap shot of the current state of the jobs in the jobStore
##########################################
toilState = ToilState(jobStore, rootJobWrapper)
##########################################
# Load the jobBatcher class - used to track jobs submitted to the batch-system
##########################################
# Kill any jobs on the batch system queue from the last time.
assert (
len(batchSystem.getIssuedBatchJobIDs()) == 0
) # Batch system must start with no active jobs!
logger.info("Checked batch system has no running jobs and no updated jobs")
jobBatcher = JobBatcher(config, batchSystem, jobStore, toilState)
logger.info(
"Found %s jobs to start and %i jobs with successors to run",
len(toilState.updatedJobs),
len(toilState.successorCounts),
)
##########################################
# Start the stats/logging aggregation process
##########################################
stopStatsAndLoggingAggregatorProcess = Queue() # When this is s
worker = Process(
target=statsAndLoggingAggregatorProcess,
args=(jobStore, stopStatsAndLoggingAggregatorProcess),
)
worker.start()
##########################################
# The main loop in which jobs are scheduled/processed
##########################################
# Sets up the timing of the jobWrapper rescuing method
timeSinceJobsLastRescued = time.time()
# Number of jobs that can not be completed successful after exhausting retries
totalFailedJobs = 0
logger.info("Starting the main loop")
while True:
##########################################
# Process jobs that are ready to be scheduled/have successors to schedule
##########################################
if len(toilState.updatedJobs) > 0:
logger.debug(
"Built the jobs list, currently have %i jobs to update and %i jobs issued",
len(toilState.updatedJobs),
jobBatcher.getNumberOfJobsIssued(),
)
for jobWrapper, resultStatus in toilState.updatedJobs:
# If the jobWrapper has a command it must be run before any successors
# Similarly, if the job previously failed we rerun it, even if it doesn't have a command to
# run, to eliminate any parts of the stack now completed.
if jobWrapper.command != None or resultStatus != 0:
if jobWrapper.remainingRetryCount > 0:
jobBatcher.issueJob(
jobWrapper.jobStoreID,
jobWrapper.memory,
jobWrapper.cores,
jobWrapper.disk,
)
else:
totalFailedJobs += 1
logger.warn(
"Job: %s is completely failed", jobWrapper.jobStoreID
)
# There exist successors to run
elif len(jobWrapper.stack) > 0:
assert len(jobWrapper.stack[-1]) > 0
logger.debug(
"Job: %s has %i successors to schedule",
jobWrapper.jobStoreID,
len(jobWrapper.stack[-1]),
)
# Record the number of successors that must be completed before
# the jobWrapper can be considered again
assert jobWrapper not in toilState.successorCounts
toilState.successorCounts[jobWrapper] = len(jobWrapper.stack[-1])
# List of successors to schedule
successors = []
# For each successor schedule if all predecessors have been completed
for (
successorJobStoreID,
memory,
cores,
disk,
predecessorID,
) in jobWrapper.stack.pop():
# Build map from successor to predecessors.
if (
successorJobStoreID
not in toilState.successorJobStoreIDToPredecessorJobs
):
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
] = []
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
].append(jobWrapper)
# Case that the jobWrapper has multiple predecessors
if predecessorID != None:
# Load the wrapped jobWrapper
job2 = jobStore.load(successorJobStoreID)
# Remove the predecessor from the list of predecessors
job2.predecessorsFinished.add(predecessorID)
# Checkpoint
jobStore.update(job2)
# If the jobs predecessors have all not all completed then
# ignore the jobWrapper
assert len(job2.predecessorsFinished) >= 1
assert (
len(job2.predecessorsFinished) <= job2.predecessorNumber
)
if len(job2.predecessorsFinished) < job2.predecessorNumber:
continue
successors.append((successorJobStoreID, memory, cores, disk))
jobBatcher.issueJobs(successors)
# There are no remaining tasks to schedule within the jobWrapper, but
# we schedule it anyway to allow it to be deleted.
# TODO: An alternative would be simple delete it here and add it to the
# list of jobs to process, or (better) to create an asynchronous
# process that deletes jobs and then feeds them back into the set
# of jobs to be processed
else:
if jobWrapper.remainingRetryCount > 0:
jobBatcher.issueJob(
jobWrapper.jobStoreID,
config.defaultMemory,
config.defaultCores,
config.defaultDisk,
)
logger.debug(
"Job: %s is empty, we are scheduling to clean it up",
jobWrapper.jobStoreID,
)
else:
totalFailedJobs += 1
logger.warn(
"Job: %s is empty but completely failed - something is very wrong",
jobWrapper.jobStoreID,
)
toilState.updatedJobs = set() # We've considered them all, so reset
##########################################
# The exit criterion
##########################################
if jobBatcher.getNumberOfJobsIssued() == 0:
logger.info(
"Only failed jobs and their dependents (%i total) are remaining, so exiting.",
totalFailedJobs,
)
break
##########################################
# Gather any new, updated jobWrapper from the batch system
##########################################
# Asks the batch system what jobs have been completed,
# give
updatedJob = batchSystem.getUpdatedBatchJob(10)
if updatedJob != None:
jobBatchSystemID, result = updatedJob
if jobBatcher.hasJob(jobBatchSystemID):
if result == 0:
logger.debug(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s ended successfully",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
)
else:
logger.warn(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s failed with exit value %i",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
result,
)
jobBatcher.processFinishedJob(jobBatchSystemID, result)
else:
logger.warn(
"A result seems to already have been processed "
"for jobWrapper with batch system ID: %i",
jobBatchSystemID,
)
else:
##########################################
# Process jobs that have gone awry
##########################################
# In the case that there is nothing happening
# (no updated jobWrapper to gather for 10 seconds)
# check if their are any jobs that have run too long
# (see JobBatcher.reissueOverLongJobs) or which
# have gone missing from the batch system (see JobBatcher.reissueMissingJobs)
if (
time.time() - timeSinceJobsLastRescued >= config.rescueJobsFrequency
): # We only
# rescue jobs every N seconds, and when we have
# apparently exhausted the current jobWrapper supply
jobBatcher.reissueOverLongJobs()
logger.info("Reissued any over long jobs")
hasNoMissingJobs = jobBatcher.reissueMissingJobs()
if hasNoMissingJobs:
timeSinceJobsLastRescued = time.time()
else:
timeSinceJobsLastRescued += 60 # This means we'll try again
# in a minute, providing things are quiet
logger.info("Rescued any (long) missing jobs")
logger.info("Finished the main loop")
##########################################
# Finish up the stats/logging aggregation process
##########################################
logger.info("Waiting for stats and logging collator process to finish")
startTime = time.time()
stopStatsAndLoggingAggregatorProcess.put(True)
worker.join()
logger.info(
"Stats/logging finished collating in %s seconds", time.time() - startTime
)
# in addition to cleaning on exceptions, onError should clean if there are any failed jobs
# Parse out the return value from the root job
with jobStore.readSharedFileStream("rootJobReturnValue") as fH:
jobStoreFileID = fH.read()
with jobStore.readFileStream(jobStoreFileID) as fH:
rootJobReturnValue = cPickle.load(fH)
if totalFailedJobs > 0:
if config.clean == "onError" or config.clean == "always":
jobStore.deleteJobStore()
raise FailedJobsException(config.jobStore, totalFailedJobs)
if config.clean == "onSuccess" or config.clean == "always":
jobStore.deleteJobStore()
return rootJobReturnValue
|
https://github.com/DataBiosphere/toil/issues/609
|
INFO:toil.lib.bioio:Logging set at level: INFO
INFO:toil.common:Using the single machine batch system
INFO:toil.jobStores.fileJobStore:Jobstore directory is: /Users/sfrazer/projects/toil/toilWorkflow
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (8).
WARNING:toil.batchSystems.singleMachine:Limiting maxMemory to physically available memory (8589934592).
INFO:toil.batchSystems.singleMachine:Setting up the thread pool with 80 workers, given a minimum CPU fraction of 0.100000 and a maximum CPU value of 8.
INFO:toil.common:Written the environment for the jobs to the environment file
INFO:toil.leader:Checked batch system has no running jobs and no updated jobs
INFO:toil.leader:Found 1 jobs to start and 0 jobs with successors to run
INFO:toil.leader:Starting the main loop
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow b/V/jobUulNcW'.
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow X/J/joby_OmSG'.
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow k/r/jobSMCrWI'.
INFO:toil.batchSystems.singleMachine:Executing command: '/Users/sfrazer/venv/wdl-toil/bin/_toil_worker /Users/sfrazer/projects/toil/toilWorkflow b/V/jobUulNcW'.
Process Process-1:
Traceback (most recent call last):
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "/Users/sfrazer/projects/toil/src/toil/leader.py", line 67, in statsAndLoggingAggregatorProcess
if jobStore.readStatsAndLogging(callback) == 0:
File "/Users/sfrazer/projects/toil/src/toil/jobStores/fileJobStore.py", line 238, in readStatsAndLogging
callback(fH)
File "/Users/sfrazer/projects/toil/src/toil/leader.py", line 47, in callback
workers = stats.workers
AttributeError: 'Expando' object has no attribute 'workers'
INFO:toil.leader:Only failed jobs and their dependents (0 total) are remaining, so exiting.
INFO:toil.leader:Finished the main loop
INFO:toil.leader:Waiting for stats and logging collator process to finish
INFO:toil.leader:Stats/logging finished collating in 0.000979900360107 seconds
None
|
AttributeError
|
def retryOnAzureTimeout(exception):
timeoutMsg = "could not be completed within the specified time"
busyMsg = "Service Unavailable"
return isinstance(exception, WindowsAzureError) and (
timeoutMsg in str(exception) or busyMsg in str(exception)
)
|
def retryOnAzureTimeout(exception):
timeoutMsg = "could not be completed within the specified time"
return isinstance(exception, WindowsAzureError) and timeoutMsg in str(exception)
|
https://github.com/DataBiosphere/toil/issues/583
|
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 9468 using offer 20151124-210426-83886090-5050-35140-O28977...
INFO:toil.batchSystems.mesos.batchSystem:...launching Mesos task 9468
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 9469 using offer 20151124-210426-83886090-5050-35140-O28978...
INFO:toil.batchSystems.mesos.batchSystem:Stopping Mesos driver
I1125 01:15:24.215514 61380 sched.cpp:1589] Asked to stop the driver
INFO:toil.batchSystems.mesos.batchSystem:Joining Mesos driver
INFO:toil.batchSystems.mesos.batchSystem:Joined Mesos driver
Traceback (most recent call last):
File "scripts/parallelMappingEvaluation.py", line 1453, in <module>
sys.exit(main(sys.argv))
File "scripts/parallelMappingEvaluation.py", line 1443, in main
failed_jobs = Job.Runner.startToil(root_job, options)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 438, in startToil
return mainLoop(config, batchSystem, jobStore, rootJob)
File "/usr/local/lib/python2.7/dist-packages/toil/leader.py", line 460, in mainLoop
jobBatcher.processFinishedJob(jobBatchSystemID, result)
File "/usr/local/lib/python2.7/dist-packages/toil/leader.py", line 223, in processFinishedJob
jobWrapper = self.jobStore.load(jobStoreID)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 146, in load
jobEntity = self.jobItems.get_entity(row_key=jobStoreID)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 521, in get_entity
return self.__getattr__('get_entity')(**kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 515, in f
return function(**kwargs)
File "/usr/local/lib/python2.7/dist-packages/azure/storage/tableservice.py", line 361, in get_entity
response = self._perform_request(request)
File "/usr/local/lib/python2.7/dist-packages/azure/storage/storageclient.py", line 179, in _perform_request
_storage_error_handler(ex)
File "/usr/local/lib/python2.7/dist-packages/azure/storage/__init__.py", line 1174, in _storage_error_handler
return _general_error_handler(http_error)
File "/usr/local/lib/python2.7/dist-packages/azure/__init__.py", line 551, in _general_error_handler
http_error.respbody.decode('utf-8-sig'))
azure.WindowsAzureError: Unknown error (Service Unavailable)
<?xml version="1.0" encoding="utf-8"?><error xmlns="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata"><code>ServerBusy</code><message xml:lang="en-US">The server is busy.
RequestId:6552fd81-0002-0063-361e-275738000000
Time:2015-11-25T01:15:24.4344591Z</message></error>
INFO:toil.batchSystems.mesos.batchSystem:...launching Mesos task 9469
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 9480 using offer 20151124-210426-83886090-5050-35140-O28979...
Process Process-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/leader.py", line 67, in statsAndLoggingAggregatorProcess
if jobStore.readStatsAndLogging(statsAndLoggingCallBackFn) == 0:
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 277, in readStatsAndLogging
for entity in self.statsFileIDs.query_entities():
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 515, in f
return function(**kwargs)
File "/usr/local/lib/python2.7/dist-packages/azure/storage/tableservice.py", line 401, in query_entities
response = self._perform_request(request)
File "/usr/local/lib/python2.7/dist-packages/azure/storage/storageclient.py", line 179, in _perform_request
_storage_error_handler(ex)
File "/usr/local/lib/python2.7/dist-packages/azure/storage/__init__.py", line 1174, in _storage_error_handler
return _general_error_handler(http_error)
File "/usr/local/lib/python2.7/dist-packages/azure/__init__.py", line 551, in _general_error_handler
http_error.respbody.decode('utf-8-sig'))
WindowsAzureError: Unknown error (Service Unavailable)
<?xml version="1.0" encoding="utf-8"?><error xmlns="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata"><code>ServerBusy</code><message xml:lang="en-US">The server is busy.
RequestId:5b65500a-0002-00e4-0f1e-270217000000
Time:2015-11-25T01:15:24.3709488Z</message></error>
INFO:toil.batchSystems.mesos.batchSystem:...launching Mesos task 9480
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 9481 using offer 20151124-210426-83886090-5050-35140-O28980...
Failed to call scheduler's resourceOffer
sys.excepthook is missing
lost sys.stderr
I1125 01:15:24.473640 61398 sched.cpp:1623] Asked to abort the driver
I1125 01:15:24.474150 61398 sched.cpp:831] Stopping framework '20151124-210426-83886090-5050-35140-0006'
|
azure.WindowsAzureError
|
def _startParasol(self, numCores=None, memory=None):
if numCores is None:
numCores = multiprocessing.cpu_count()
if memory is None:
memory = physicalMemory()
self.numCores = numCores
self.memory = memory
self.leader = self.ParasolLeaderThread()
self.leader.start()
self.worker = self.ParasolWorkerThread()
self.worker.start()
while self.leader.popen is None or self.worker.popen is None:
log.info("Waiting for leader and worker processes")
time.sleep(0.1)
|
def _startParasol(self, numCores=None, memory=None):
if numCores is None:
numCores = multiprocessing.cpu_count()
if memory is None:
memory = self._physicalMemory()
self.numCores = numCores
self.memory = memory
self.leader = self.ParasolLeaderThread()
self.leader.start()
self.worker = self.ParasolWorkerThread()
self.worker.start()
while self.leader.popen is None or self.worker.popen is None:
log.info("Waiting for leader and worker processes")
time.sleep(0.1)
|
https://github.com/DataBiosphere/toil/issues/587
|
Traceback (most recent call last):
File "wdltoil.py", line 1, in <module>
from toil.job import Job
File "/Users/sfrazer/projects/toil/src/toil/job.py", line 36, in <module>
from toil.common import loadJobStore
File "/Users/sfrazer/projects/toil/src/toil/common.py", line 27, in <module>
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
File "/Users/sfrazer/projects/toil/src/toil/batchSystems/singleMachine.py", line 31, in <module>
class SingleMachineBatchSystem(AbstractBatchSystem):
File "/Users/sfrazer/projects/toil/src/toil/batchSystems/singleMachine.py", line 47, in SingleMachineBatchSystem
physicalMemory = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
ValueError: unrecognized configuration name
|
ValueError
|
def logToMaster(self, text, level=logging.INFO):
"""
Send a logging message to the leader. The message will also be \
logged by the worker at the same level.
:param string: The string to log.
:param int level: The logging level.
"""
logger.log(level=level, msg=("LOG-TO-MASTER: " + text))
self.loggingMessages.append(dict(text=text, level=level))
|
def logToMaster(self, string, level=logging.INFO):
"""
Send a logging message to the leader. The message will also be \
logged by the worker at the same level.
:param string: The string to log.
:param int level: The logging level.
"""
logger.log(level=level, msg=("LOG-TO-MASTER: " + string))
self.loggingMessages.append((str(string), level))
|
https://github.com/DataBiosphere/toil/issues/427
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 392, in main
jobStore.writeStatsAndLogging(ET.tostring(elementNode))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1126, in tostring
ElementTree(element).write(file, encoding, method=method)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 820, in write
serialize(write, self._root, encoding, qnames, namespaces)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 937, in _serialize_xml
write(_escape_cdata(text, encoding))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1073, in _escape_cdata
return text.encode(encoding, "xmlcharrefreplace")
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 48: ordinal not in range(128)
|
UnicodeDecodeError
|
def _execute(self, jobWrapper, stats, localTempDir, jobStore, fileStore):
"""
This is the core method for running the job within a worker.
"""
if stats != None:
startTime = time.time()
startClock = getTotalCpuTime()
baseDir = os.getcwd()
# Run the job
returnValues = self.run(fileStore)
# Serialize the new jobs defined by the run method to the jobStore
self._serialiseJobGraph(jobWrapper, jobStore, returnValues, False)
# Add the promise files to delete to the list of jobStoreFileIDs to delete
for jobStoreFileID in promiseFilesToDelete:
fileStore.deleteGlobalFile(jobStoreFileID)
promiseFilesToDelete.clear()
# Now indicate the asynchronous update of the job can happen
fileStore._updateJobWhenDone()
# Change dir back to cwd dir, if changed by job (this is a safety issue)
if os.getcwd() != baseDir:
os.chdir(baseDir)
# Finish up the stats
if stats != None:
stats.jobs.time = str(time.time() - startTime)
totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage()
stats.jobs.clock = str(totalCpuTime - startClock)
stats.jobs.class_name = self._jobName()
stats.jobs.memory = str(totalMemoryUsage)
|
def _execute(self, jobWrapper, stats, localTempDir, jobStore, fileStore):
"""
This is the core method for running the job within a worker.
"""
if stats != None:
startTime = time.time()
startClock = getTotalCpuTime()
baseDir = os.getcwd()
# Run the job
returnValues = self.run(fileStore)
# Serialize the new jobs defined by the run method to the jobStore
self._serialiseJobGraph(jobWrapper, jobStore, returnValues, False)
# Add the promise files to delete to the list of jobStoreFileIDs to delete
for jobStoreFileID in promiseFilesToDelete:
fileStore.deleteGlobalFile(jobStoreFileID)
promiseFilesToDelete.clear()
# Now indicate the asynchronous update of the job can happen
fileStore._updateJobWhenDone()
# Change dir back to cwd dir, if changed by job (this is a safety issue)
if os.getcwd() != baseDir:
os.chdir(baseDir)
# Finish up the stats
if stats != None:
stats = ET.SubElement(stats, "job")
stats.attrib["time"] = str(time.time() - startTime)
totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage()
stats.attrib["clock"] = str(totalCpuTime - startClock)
stats.attrib["class"] = self._jobName()
stats.attrib["memory"] = str(totalMemoryUsage)
|
https://github.com/DataBiosphere/toil/issues/427
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 392, in main
jobStore.writeStatsAndLogging(ET.tostring(elementNode))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1126, in tostring
ElementTree(element).write(file, encoding, method=method)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 820, in write
serialize(write, self._root, encoding, qnames, namespaces)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 937, in _serialize_xml
write(_escape_cdata(text, encoding))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1073, in _escape_cdata
return text.encode(encoding, "xmlcharrefreplace")
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 48: ordinal not in range(128)
|
UnicodeDecodeError
|
def readStatsAndLogging(self, callback, readAll=False):
"""
Reads stats/logging strings accumulated by the writeStatsAndLogging() method. For each
stats/logging string this method calls the given callback function with an open,
readable file handle from which the stats string can be read. Returns the number of
stats/logging strings processed. Each stats/logging string is only processed once unless
the readAll parameter is set, in which case the given callback will be invoked for all
existing stats/logging strings, including the ones from a previous invocation of this
method.
:type callback: callable
:type readAll: bool
"""
raise NotImplementedError()
|
def readStatsAndLogging(self, statsAndLoggingCallBackFn):
"""
Reads stats/logging strings accumulated by "writeStatsAndLogging" function.
For each stats/logging file calls the statsAndLoggingCallBackFn with
an open, readable file-handle that can be used to parse the stats.
Returns the number of stat/logging strings processed.
Stats/logging files are only read once and are removed from the
file store after being written to the given file handle.
"""
raise NotImplementedError()
|
https://github.com/DataBiosphere/toil/issues/427
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 392, in main
jobStore.writeStatsAndLogging(ET.tostring(elementNode))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1126, in tostring
ElementTree(element).write(file, encoding, method=method)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 820, in write
serialize(write, self._root, encoding, qnames, namespaces)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 937, in _serialize_xml
write(_escape_cdata(text, encoding))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1073, in _escape_cdata
return text.encode(encoding, "xmlcharrefreplace")
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 48: ordinal not in range(128)
|
UnicodeDecodeError
|
def readStatsAndLogging(self, callback, readAll=False):
itemsProcessed = 0
for info in self._readStatsAndLogging(callback, self.statsFileOwnerID):
info._ownerID = self.readStatsFileOwnerID
info.save()
itemsProcessed += 1
if readAll:
for _ in self._readStatsAndLogging(callback, self.readStatsFileOwnerID):
itemsProcessed += 1
return itemsProcessed
|
def readStatsAndLogging(self, statsCallBackFn):
itemsProcessed = 0
items = None
for attempt in retry_sdb():
with attempt:
items = list(
self.filesDomain.select(
consistent_read=True,
query="select * from `%s` where ownerID='%s'"
% (self.filesDomain.name, str(self.statsFileOwnerID)),
)
)
assert items is not None
for item in items:
info = self.FileInfo.fromItem(item)
with info.downloadStream() as readable:
statsCallBackFn(readable)
self.deleteFile(item.name)
itemsProcessed += 1
return itemsProcessed
|
https://github.com/DataBiosphere/toil/issues/427
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 392, in main
jobStore.writeStatsAndLogging(ET.tostring(elementNode))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1126, in tostring
ElementTree(element).write(file, encoding, method=method)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 820, in write
serialize(write, self._root, encoding, qnames, namespaces)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 937, in _serialize_xml
write(_escape_cdata(text, encoding))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1073, in _escape_cdata
return text.encode(encoding, "xmlcharrefreplace")
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 48: ordinal not in range(128)
|
UnicodeDecodeError
|
def readStatsAndLogging(self, callback, readAll=False):
suffix = "_old"
numStatsFiles = 0
for entity in self.statsFileIDs.query_entities():
jobStoreFileID = entity.RowKey
hasBeenRead = len(jobStoreFileID) > self.jobIDLength
if not hasBeenRead:
with self._downloadStream(jobStoreFileID, self.statsFiles) as fd:
callback(fd)
# Mark this entity as read by appending the suffix
self.statsFileIDs.insert_entity(entity={"RowKey": jobStoreFileID + suffix})
self.statsFileIDs.delete_entity(row_key=jobStoreFileID)
numStatsFiles += 1
elif readAll:
# Strip the suffix to get the original ID
jobStoreFileID = jobStoreFileID[: -len(suffix)]
with self._downloadStream(jobStoreFileID, self.statsFiles) as fd:
callback(fd)
numStatsFiles += 1
return numStatsFiles
|
def readStatsAndLogging(self, statsAndLoggingCallbackFn):
numStatsFiles = 0
for entity in self.statsFileIDs.query_entities():
jobStoreFileID = entity.RowKey
with self._downloadStream(jobStoreFileID, self.statsFiles) as fd:
statsAndLoggingCallbackFn(fd)
self.statsFiles.delete_blob(blob_name=jobStoreFileID)
self.statsFileIDs.delete_entity(row_key=jobStoreFileID)
numStatsFiles += 1
return numStatsFiles
|
https://github.com/DataBiosphere/toil/issues/427
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 392, in main
jobStore.writeStatsAndLogging(ET.tostring(elementNode))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1126, in tostring
ElementTree(element).write(file, encoding, method=method)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 820, in write
serialize(write, self._root, encoding, qnames, namespaces)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 937, in _serialize_xml
write(_escape_cdata(text, encoding))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1073, in _escape_cdata
return text.encode(encoding, "xmlcharrefreplace")
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 48: ordinal not in range(128)
|
UnicodeDecodeError
|
def readStatsAndLogging(self, callback, readAll=False):
numberOfFilesProcessed = 0
for tempDir in self._tempDirectories():
for tempFile in os.listdir(tempDir):
if tempFile.startswith("stats"):
absTempFile = os.path.join(tempDir, tempFile)
if readAll or not tempFile.endswith(".new"):
with open(absTempFile, "r") as fH:
callback(fH)
numberOfFilesProcessed += 1
newName = tempFile.rsplit(".", 1)[0] + ".new"
newAbsTempFile = os.path.join(tempDir, newName)
# Mark this item as read
os.rename(absTempFile, newAbsTempFile)
return numberOfFilesProcessed
|
def readStatsAndLogging(self, statsAndLoggingCallBackFn):
numberOfFilesProcessed = 0
for tempDir in self._tempDirectories():
for tempFile in os.listdir(tempDir):
if tempFile.startswith("stats"):
absTempFile = os.path.join(tempDir, tempFile)
if not tempFile.endswith(".new"):
with open(absTempFile, "r") as fH:
statsAndLoggingCallBackFn(fH)
numberOfFilesProcessed += 1
os.remove(absTempFile)
return numberOfFilesProcessed
|
https://github.com/DataBiosphere/toil/issues/427
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 392, in main
jobStore.writeStatsAndLogging(ET.tostring(elementNode))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1126, in tostring
ElementTree(element).write(file, encoding, method=method)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 820, in write
serialize(write, self._root, encoding, qnames, namespaces)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 937, in _serialize_xml
write(_escape_cdata(text, encoding))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1073, in _escape_cdata
return text.encode(encoding, "xmlcharrefreplace")
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 48: ordinal not in range(128)
|
UnicodeDecodeError
|
def statsAndLoggingAggregatorProcess(jobStore, stop):
"""
The following function is used for collating stats/reporting log messages from the workers.
Works inside of a separate process, collates as long as the stop flag is not True.
"""
# Overall timing
startTime = time.time()
startClock = getTotalCpuTime()
def callback(fileHandle):
stats = json.load(fileHandle, object_hook=Expando)
workers = stats.workers
try:
logs = workers.log
except AttributeError:
# To be expected if there were no calls to logToMaster()
pass
else:
for message in logs:
logger.log(
int(message.level),
"Got message from job at time: %s : %s",
time.strftime("%m-%d-%Y %H:%M:%S"),
message.text,
)
for log in stats.logs:
logger.info("%s: %s", log.jobStoreID, log.text)
while True:
# This is a indirect way of getting a message to the process to exit
if not stop.empty():
jobStore.readStatsAndLogging(callback)
break
if jobStore.readStatsAndLogging(callback) == 0:
time.sleep(0.5) # Avoid cycling too fast
# Finish the stats file
text = json.dumps(
dict(
total_time=str(time.time() - startTime),
total_clock=str(getTotalCpuTime() - startClock),
)
)
jobStore.writeStatsAndLogging(text)
|
def statsAndLoggingAggregatorProcess(jobStore, stop):
"""
The following function is used for collating stats/reporting log messages from the workers.
Works inside of a separate process, collates as long as the stop flag is not True.
"""
# Overall timing
startTime = time.time()
startClock = getTotalCpuTime()
# Start off the stats file
with jobStore.writeSharedFileStream("statsAndLogging.xml") as fileHandle:
fileHandle.write('<?xml version="1.0" ?><stats>')
# Call back function
def statsAndLoggingCallBackFn(fileHandle2):
node = ET.parse(fileHandle2).getroot()
nodesNamed = node.find("messages").findall
for message in nodesNamed("message"):
logger.log(
int(message.attrib["level"]),
"Got message from job at time: %s : %s",
time.strftime("%m-%d-%Y %H:%M:%S"),
message.text,
)
for log in nodesNamed("log"):
logger.info(
"%s: %s" % tuple(log.text.split("!", 1))
) # the jobID is separated from log by "!"
ET.ElementTree(node).write(fileHandle)
# The main loop
timeSinceOutFileLastFlushed = time.time()
while True:
if not stop.empty(): # This is a indirect way of getting a message to
# the process to exit
jobStore.readStatsAndLogging(statsAndLoggingCallBackFn)
break
if jobStore.readStatsAndLogging(statsAndLoggingCallBackFn) == 0:
time.sleep(0.5) # Avoid cycling too fast
if time.time() - timeSinceOutFileLastFlushed > 60: # Flush the
# results file every minute
fileHandle.flush()
timeSinceOutFileLastFlushed = time.time()
# Finish the stats file
fileHandle.write(
"<total_time time='%s' clock='%s'/></stats>"
% (str(time.time() - startTime), str(getTotalCpuTime() - startClock))
)
|
https://github.com/DataBiosphere/toil/issues/427
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 392, in main
jobStore.writeStatsAndLogging(ET.tostring(elementNode))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1126, in tostring
ElementTree(element).write(file, encoding, method=method)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 820, in write
serialize(write, self._root, encoding, qnames, namespaces)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 939, in _serialize_xml
_serialize_xml(write, e, encoding, qnames, None)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 937, in _serialize_xml
write(_escape_cdata(text, encoding))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1073, in _escape_cdata
return text.encode(encoding, "xmlcharrefreplace")
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 48: ordinal not in range(128)
|
UnicodeDecodeError
|
def __init__(
self, accountName, namePrefix, config=None, jobChunkSize=maxAzureTablePropertySize
):
self.jobChunkSize = jobChunkSize
self.keyPath = None
self.account_key = _fetchAzureAccountKey(accountName)
# Table names have strict requirements in Azure
self.namePrefix = self._sanitizeTableName(namePrefix)
log.debug("Creating job store with name prefix '%s'" % self.namePrefix)
# These are the main API entrypoints.
self.tableService = TableService(
account_key=self.account_key, account_name=accountName
)
self.blobService = BlobService(
account_key=self.account_key, account_name=accountName
)
# Register our job-store in the global table for this storage account
self.registryTable = self._getOrCreateTable("toilRegistry")
exists = self.registryTable.get_entity(row_key=self.namePrefix)
self._checkJobStoreCreation(
config is not None, exists, accountName + ":" + self.namePrefix
)
self.registryTable.insert_or_replace_entity(
row_key=self.namePrefix, entity={"exists": True}
)
# Serialized jobs table
self.jobItems = self._getOrCreateTable(self.qualify("jobs"))
# Job<->file mapping table
self.jobFileIDs = self._getOrCreateTable(self.qualify("jobFileIDs"))
# Container for all shared and unshared files
self.files = self._getOrCreateBlobContainer(self.qualify("files"))
# Stats and logging strings
self.statsFiles = self._getOrCreateBlobContainer(self.qualify("statsfiles"))
# File IDs that contain stats and logging strings
self.statsFileIDs = self._getOrCreateTable(self.qualify("statsFileIDs"))
super(AzureJobStore, self).__init__(config=config)
if self.config.cseKey is not None:
self.keyPath = self.config.cseKey
|
def __init__(self, accountName, namePrefix, config=None, jobChunkSize=65535):
self.jobChunkSize = jobChunkSize
self.keyPath = None
self.account_key = _fetchAzureAccountKey(accountName)
# Table names have strict requirements in Azure
self.namePrefix = self._sanitizeTableName(namePrefix)
log.debug("Creating job store with name prefix '%s'" % self.namePrefix)
# These are the main API entrypoints.
self.tableService = TableService(
account_key=self.account_key, account_name=accountName
)
self.blobService = BlobService(
account_key=self.account_key, account_name=accountName
)
# Register our job-store in the global table for this storage account
self.registryTable = self._getOrCreateTable("toilRegistry")
exists = self.registryTable.get_entity(row_key=self.namePrefix)
self._checkJobStoreCreation(
config is not None, exists, accountName + ":" + self.namePrefix
)
self.registryTable.insert_or_replace_entity(
row_key=self.namePrefix, entity={"exists": True}
)
# Serialized jobs table
self.jobItems = self._getOrCreateTable(self.qualify("jobs"))
# Job<->file mapping table
self.jobFileIDs = self._getOrCreateTable(self.qualify("jobFileIDs"))
# Container for all shared and unshared files
self.files = self._getOrCreateBlobContainer(self.qualify("files"))
# Stats and logging strings
self.statsFiles = self._getOrCreateBlobContainer(self.qualify("statsfiles"))
# File IDs that contain stats and logging strings
self.statsFileIDs = self._getOrCreateTable(self.qualify("statsFileIDs"))
super(AzureJobStore, self).__init__(config=config)
if self.config.cseKey is not None:
self.keyPath = self.config.cseKey
|
https://github.com/DataBiosphere/toil/issues/577
|
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: Traceback (most recent call last):
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 282, in main
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: fileStore=fileStore)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1282, in _execute
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: self._serialiseJobGraph(jobWrapper, jobStore, returnValues, False)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1233, in _serialiseJobGraph
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: job._serialiseJob(jobStore, jobsToJobWrappers, jobWrapper)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1196, in _serialiseJob
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: jobStore.update(jobsToJobWrappers[self])
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 150, in update
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: entity=job.toItem(chunkSize=self.jobChunkSize))
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 517, in f
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: return function(**kwargs)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/storage/tableservice.py", line 476, in update_entity
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: response = self._perform_request(request)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/storage/storageclient.py", line 179, in _perform_request
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: _storage_error_handler(ex)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/storage/__init__.py", line 1174, in _storage_error_handler
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: return _general_error_handler(http_error)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/__init__.py", line 551, in _general_error_handler
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: http_error.respbody.decode('utf-8-sig'))
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: WindowsAzureError: Unknown error (Bad Request)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: <?xml version="1.0" encoding="utf-8"?><error xmlns="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata"><code>PropertyValueTooLarge</code><message xml:lang="en-US">The property value exceeds the maximum allowed size (64KB). If the property value is a string, it is UTF-16 encoded and the maximum number of characters should be 32K or less.
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: RequestId:bc75b149-0002-0131-4274-260c9f000000
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: Time:2015-11-24T04:54:04.2292362Z</message></error>
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: Exiting the worker because of a failed jobWrapper on host c1agent34
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent34
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 4f782575_20d9_48bb_bc1f_94cc8b207479 to 0
|
WindowsAzureError
|
def fromItem(cls, item):
"""
:type item: dict
:rtype: AzureJob
"""
chunkedJob = item.items()
chunkedJob.sort()
if len(chunkedJob) == 1:
# First element of list = tuple, second element of tuple = serialized job
wholeJobString = chunkedJob[0][1].value
else:
wholeJobString = "".join(item[1].value for item in chunkedJob)
return cPickle.loads(bz2.decompress(wholeJobString))
|
def fromItem(cls, item):
"""
:type item: dict
:rtype: AzureJob
"""
chunkedJob = item.items()
chunkedJob.sort()
if len(chunkedJob) == 1:
# First element of list = tuple, second element of tuple = serialized job
wholeJobString = chunkedJob[0][1]
else:
wholeJobString = "".join(item[1] for item in chunkedJob)
return cPickle.loads(bz2.decompress(base64.b64decode(wholeJobString)))
|
https://github.com/DataBiosphere/toil/issues/577
|
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: Traceback (most recent call last):
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 282, in main
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: fileStore=fileStore)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1282, in _execute
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: self._serialiseJobGraph(jobWrapper, jobStore, returnValues, False)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1233, in _serialiseJobGraph
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: job._serialiseJob(jobStore, jobsToJobWrappers, jobWrapper)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1196, in _serialiseJob
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: jobStore.update(jobsToJobWrappers[self])
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 150, in update
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: entity=job.toItem(chunkSize=self.jobChunkSize))
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 517, in f
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: return function(**kwargs)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/storage/tableservice.py", line 476, in update_entity
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: response = self._perform_request(request)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/storage/storageclient.py", line 179, in _perform_request
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: _storage_error_handler(ex)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/storage/__init__.py", line 1174, in _storage_error_handler
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: return _general_error_handler(http_error)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/__init__.py", line 551, in _general_error_handler
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: http_error.respbody.decode('utf-8-sig'))
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: WindowsAzureError: Unknown error (Bad Request)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: <?xml version="1.0" encoding="utf-8"?><error xmlns="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata"><code>PropertyValueTooLarge</code><message xml:lang="en-US">The property value exceeds the maximum allowed size (64KB). If the property value is a string, it is UTF-16 encoded and the maximum number of characters should be 32K or less.
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: RequestId:bc75b149-0002-0131-4274-260c9f000000
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: Time:2015-11-24T04:54:04.2292362Z</message></error>
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: Exiting the worker because of a failed jobWrapper on host c1agent34
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent34
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 4f782575_20d9_48bb_bc1f_94cc8b207479 to 0
|
WindowsAzureError
|
def toItem(self, chunkSize=maxAzureTablePropertySize):
"""
:param chunkSize: the size of a chunk for splitting up the serialized job into chunks
that each fit into a property value of the an Azure table entity
:rtype: dict
"""
assert chunkSize <= maxAzureTablePropertySize
item = {}
serializedAndEncodedJob = bz2.compress(cPickle.dumps(self))
jobChunks = [
serializedAndEncodedJob[i : i + chunkSize]
for i in range(0, len(serializedAndEncodedJob), chunkSize)
]
for attributeOrder, chunk in enumerate(jobChunks):
item["_" + str(attributeOrder).zfill(3)] = EntityProperty("Edm.Binary", chunk)
return item
|
def toItem(self, chunkSize=65535):
"""
:rtype: dict
"""
item = {}
serializedAndEncodedJob = base64.b64encode(bz2.compress(cPickle.dumps(self)))
jobChunks = [
serializedAndEncodedJob[i : i + chunkSize]
for i in range(0, len(serializedAndEncodedJob), chunkSize)
]
for attributeOrder, chunk in enumerate(jobChunks):
item["_" + str(attributeOrder).zfill(3)] = chunk
return item
|
https://github.com/DataBiosphere/toil/issues/577
|
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: Traceback (most recent call last):
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 282, in main
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: fileStore=fileStore)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1282, in _execute
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: self._serialiseJobGraph(jobWrapper, jobStore, returnValues, False)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1233, in _serialiseJobGraph
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: job._serialiseJob(jobStore, jobsToJobWrappers, jobWrapper)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1196, in _serialiseJob
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: jobStore.update(jobsToJobWrappers[self])
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 150, in update
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: entity=job.toItem(chunkSize=self.jobChunkSize))
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 517, in f
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: return function(**kwargs)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/storage/tableservice.py", line 476, in update_entity
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: response = self._perform_request(request)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/storage/storageclient.py", line 179, in _perform_request
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: _storage_error_handler(ex)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/storage/__init__.py", line 1174, in _storage_error_handler
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: return _general_error_handler(http_error)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: File "/usr/local/lib/python2.7/dist-packages/azure/__init__.py", line 551, in _general_error_handler
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: http_error.respbody.decode('utf-8-sig'))
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: WindowsAzureError: Unknown error (Bad Request)
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: <?xml version="1.0" encoding="utf-8"?><error xmlns="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata"><code>PropertyValueTooLarge</code><message xml:lang="en-US">The property value exceeds the maximum allowed size (64KB). If the property value is a string, it is UTF-16 encoded and the maximum number of characters should be 32K or less.
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: RequestId:bc75b149-0002-0131-4274-260c9f000000
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: Time:2015-11-24T04:54:04.2292362Z</message></error>
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: Exiting the worker because of a failed jobWrapper on host c1agent34
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent34
WARNING:toil.leader:4f782575_20d9_48bb_bc1f_94cc8b207479: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 4f782575_20d9_48bb_bc1f_94cc8b207479 to 0
|
WindowsAzureError
|
def _load(cls, leaderPath):
"""
:type leaderPath: str
"""
bytesIO = BytesIO()
# PyZipFile compiles .py files on the fly, filters out any non-Python files and
# distinguishes between packages and simple directories.
with PyZipFile(file=bytesIO, mode="w") as zipFile:
zipFile.writepy(leaderPath)
bytesIO.seek(0)
return bytesIO
|
def _load(cls, leaderPath):
bytesIO = BytesIO()
with ZipFile(file=bytesIO, mode="w") as zipFile:
for dirPath, fileNames, dirNames in os.walk(leaderPath):
assert dirPath.startswith(leaderPath)
for fileName in fileNames:
filePath = os.path.join(dirPath, fileName)
assert filePath.encode("ascii") == filePath
relativeFilePath = os.path.relpath(filePath, leaderPath)
assert not relativeFilePath.startswith(os.path.sep)
zipFile.write(filePath, relativeFilePath)
bytesIO.seek(0)
return bytesIO
|
https://github.com/DataBiosphere/toil/issues/565
|
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 0 using offer 20151120-222317-83886090-5050-35148-O5...
INFO:toil.batchSystems.mesos.batchSystem:...launching Mesos task 0
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:Reporting file: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Traceback (most recent call last):
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line
266, in main
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 936, in _loadJob
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 914, in _loadUserModule
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: return importlib.import_module(userModule.name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: __import__(name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/tmp/tmpflu3Jl/5b3f3d5ba2a95124e308998df2ab7bb2/parallelMappingEvaluation.py", line 16, in <module>
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: from toillib import *
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ImportError: No module named toillib
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 46c1efec_0849_4ce3_b74d_c74f1f0602a4 to 0
WARNING:toil.leader:Job: 46c1efec_0849_4ce3_b74d_c74f1f0602a4 is completely failed
|
ImportError
|
def forModule(cls, name):
"""
Return an instance of this class representing the module of the given name. If the given
module name is "__main__", it will be translated to the actual file name of the top-level
script without the .py or .pyc extension. This method assumes that the module with the
specified name has already been loaded.
"""
module = sys.modules[name]
filePath = os.path.abspath(module.__file__)
filePath = filePath.split(os.path.sep)
filePath[-1], extension = os.path.splitext(filePath[-1])
assert extension in (".py", ".pyc")
if name == "__main__":
if module.__package__:
# invoked via python -m foo.bar
name = [filePath.pop()]
for package in reversed(module.__package__.split(".")):
dirPathTail = filePath.pop()
assert dirPathTail == package
name.append(dirPathTail)
name = ".".join(reversed(name))
dirPath = os.path.sep.join(filePath)
else:
# invoked via python foo/bar.py
name = filePath.pop()
dirPath = os.path.sep.join(filePath)
cls._check_conflict(dirPath, name)
else:
# imported as a module
for package in reversed(name.split(".")):
dirPathTail = filePath.pop()
assert dirPathTail == package
dirPath = os.path.sep.join(filePath)
return cls(dirPath=dirPath, name=name)
|
def forModule(cls, name):
"""
Return an instance of this class representing the module of the given name. If the given
module name is "__main__", it will be translated to the actual file name of the top-level
script without the .py or .pyc extension. This method assumes that the module with the
specified name has already been loaded.
"""
module = sys.modules[name]
filePath = os.path.abspath(module.__file__)
filePath = filePath.split(os.path.sep)
filePath[-1], extension = os.path.splitext(filePath[-1])
assert extension in (".py", ".pyc")
if name == "__main__":
name = filePath.pop()
dirPath = os.path.sep.join(filePath)
cls._check_conflict(dirPath, name)
else:
for package in reversed(name.split(".")):
dirPathTail = filePath.pop()
assert dirPathTail == package
dirPath = os.path.sep.join(filePath)
return cls(dirPath=dirPath, name=name, extension=extension)
|
https://github.com/DataBiosphere/toil/issues/565
|
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 0 using offer 20151120-222317-83886090-5050-35148-O5...
INFO:toil.batchSystems.mesos.batchSystem:...launching Mesos task 0
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:Reporting file: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Traceback (most recent call last):
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line
266, in main
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 936, in _loadJob
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 914, in _loadUserModule
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: return importlib.import_module(userModule.name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: __import__(name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/tmp/tmpflu3Jl/5b3f3d5ba2a95124e308998df2ab7bb2/parallelMappingEvaluation.py", line 16, in <module>
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: from toillib import *
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ImportError: No module named toillib
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 46c1efec_0849_4ce3_b74d_c74f1f0602a4 to 0
WARNING:toil.leader:Job: 46c1efec_0849_4ce3_b74d_c74f1f0602a4 is completely failed
|
ImportError
|
def _check_conflict(cls, dirPath, name):
"""
Check whether the module of the given name conflicts with another module on the sys.path.
:param dirPath: the directory from which the module was originally loaded
:param name: the mpdule name
"""
old_sys_path = sys.path
try:
sys.path = [
d for d in old_sys_path if os.path.realpath(d) != os.path.realpath(dirPath)
]
try:
colliding_module = importlib.import_module(name)
except ImportError:
pass
else:
raise ResourceException(
"The user module '%s' collides with module '%s from '%s'."
% (name, colliding_module.__name__, colliding_module.__file__)
)
finally:
sys.path = old_sys_path
|
def _check_conflict(cls, dirPath, name):
"""
Check whether the module of the given name conflicts with another module on the sys.path.
:param dirPath: the directory from which the module was originally loaded
:param name: the mpdule name
"""
old_sys_path = sys.path
try:
sys.path = [
dir
for dir in old_sys_path
if os.path.realpath(dir) != os.path.realpath(dirPath)
]
try:
colliding_module = importlib.import_module(name)
except ImportError:
pass
else:
raise RuntimeError(
"The user module '%s' collides with module '%s from '%s'."
% (name, colliding_module.__name__, colliding_module.__file__)
)
finally:
sys.path = old_sys_path
|
https://github.com/DataBiosphere/toil/issues/565
|
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 0 using offer 20151120-222317-83886090-5050-35148-O5...
INFO:toil.batchSystems.mesos.batchSystem:...launching Mesos task 0
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:Reporting file: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Traceback (most recent call last):
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line
266, in main
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 936, in _loadJob
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 914, in _loadUserModule
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: return importlib.import_module(userModule.name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: __import__(name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/tmp/tmpflu3Jl/5b3f3d5ba2a95124e308998df2ab7bb2/parallelMappingEvaluation.py", line 16, in <module>
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: from toillib import *
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ImportError: No module named toillib
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 46c1efec_0849_4ce3_b74d_c74f1f0602a4 to 0
WARNING:toil.leader:Job: 46c1efec_0849_4ce3_b74d_c74f1f0602a4 is completely failed
|
ImportError
|
def localize(self):
"""
Check if this module was saved as a resource. If it was, return a new module descriptor
that points to a local copy of that resource. Should only be called on a worker node. On
the leader, this method returns this resource, i.e. self.
:rtype: toil.resource.Resource
"""
if self._runningOnWorker():
log.warn("The localize() method should only be invoked on a worker.")
resource = Resource.lookup(self._resourcePath)
if resource is None:
log.warn("Can't localize module %r", self)
return self
else:
def stash(tmpDirPath):
# Save the original dirPath such that we can restore it in globalize()
with open(os.path.join(tmpDirPath, ".original"), "w") as f:
f.write(json.dumps(self))
resource.download(callback=stash)
return self.__class__(dirPath=resource.localDirPath, name=self.name)
|
def localize(self):
"""
Check if this module was saved as a resource. If it was, return a new module descriptor
that points to a local copy of that resource. Should only be called on a worker node.
:rtype: toil.resource.Resource
"""
resource = Resource.lookup(self._resourcePath)
if resource is None:
log.warn("Can't localize module %r", self)
return self
else:
def stash(tmpDirPath):
# Save the original dirPath such that we can restore it in globalize()
with open(os.path.join(tmpDirPath, ".original"), "w") as f:
f.write(json.dumps(self))
resource.download(callback=stash)
return self.__class__(
dirPath=resource.localDirPath, name=self.name, extension=self.extension
)
|
https://github.com/DataBiosphere/toil/issues/565
|
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 0 using offer 20151120-222317-83886090-5050-35148-O5...
INFO:toil.batchSystems.mesos.batchSystem:...launching Mesos task 0
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:Reporting file: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Traceback (most recent call last):
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line
266, in main
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 936, in _loadJob
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 914, in _loadUserModule
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: return importlib.import_module(userModule.name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: __import__(name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/tmp/tmpflu3Jl/5b3f3d5ba2a95124e308998df2ab7bb2/parallelMappingEvaluation.py", line 16, in <module>
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: from toillib import *
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ImportError: No module named toillib
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 46c1efec_0849_4ce3_b74d_c74f1f0602a4 to 0
WARNING:toil.leader:Job: 46c1efec_0849_4ce3_b74d_c74f1f0602a4 is completely failed
|
ImportError
|
def globalize(self):
"""
Reverse the effect of localize().
"""
try:
with open(os.path.join(self.dirPath, ".original")) as f:
return self.__class__(*json.loads(f.read()))
except IOError as e:
if e.errno == errno.ENOENT:
if self._runningOnWorker():
log.warn("Can't globalize module %r.", self)
return self
else:
raise
|
def globalize(self):
try:
with open(os.path.join(self.dirPath, ".original")) as f:
return self.__class__(*json.loads(f.read()))
except IOError as e:
if e.errno == errno.ENOENT:
log.warn("Can't globalize module %r.", self)
return self
else:
raise
|
https://github.com/DataBiosphere/toil/issues/565
|
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 0 using offer 20151120-222317-83886090-5050-35148-O5...
INFO:toil.batchSystems.mesos.batchSystem:...launching Mesos task 0
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:Reporting file: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Traceback (most recent call last):
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line
266, in main
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 936, in _loadJob
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 914, in _loadUserModule
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: return importlib.import_module(userModule.name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: __import__(name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/tmp/tmpflu3Jl/5b3f3d5ba2a95124e308998df2ab7bb2/parallelMappingEvaluation.py", line 16, in <module>
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: from toillib import *
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ImportError: No module named toillib
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 46c1efec_0849_4ce3_b74d_c74f1f0602a4 to 0
WARNING:toil.leader:Job: 46c1efec_0849_4ce3_b74d_c74f1f0602a4 is completely failed
|
ImportError
|
def _resourcePath(self):
"""
The path to the directory that should be used when shipping this module and its siblings
around as a resource.
"""
if "." in self.name:
return os.path.join(self.dirPath, self._rootPackage())
else:
initName = self._initModuleName(self.dirPath)
if initName:
raise ResourceException(
"Toil does not support loading a user script from a package directory. You "
"may want to remove %s from %s or invoke the user script as a module via "
"'PYTHONPATH=\"%s\" python -m %s.%s'."
% tuple(
concat(
initName, self.dirPath, os.path.split(self.dirPath), self.name
)
)
)
return self.dirPath
|
def _resourcePath(self):
"""
The path to the file or package directory that should be used when shipping this module
around as a resource.
"""
return self.dirPath if "." in self.name else self.filePath
|
https://github.com/DataBiosphere/toil/issues/565
|
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 0 using offer 20151120-222317-83886090-5050-35148-O5...
INFO:toil.batchSystems.mesos.batchSystem:...launching Mesos task 0
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:Reporting file: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Traceback (most recent call last):
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line
266, in main
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 936, in _loadJob
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 914, in _loadUserModule
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: return importlib.import_module(userModule.name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: __import__(name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/tmp/tmpflu3Jl/5b3f3d5ba2a95124e308998df2ab7bb2/parallelMappingEvaluation.py", line 16, in <module>
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: from toillib import *
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ImportError: No module named toillib
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 46c1efec_0849_4ce3_b74d_c74f1f0602a4 to 0
WARNING:toil.leader:Job: 46c1efec_0849_4ce3_b74d_c74f1f0602a4 is completely failed
|
ImportError
|
def _loadUserModule(cls, userModule):
"""
Imports and returns the module object represented by the given module descriptor.
:type userModule: ModuleDescriptor
"""
if not userModule.belongsToToil:
userModule = userModule.localize()
if userModule.dirPath not in sys.path:
sys.path.append(userModule.dirPath)
try:
return importlib.import_module(userModule.name)
except ImportError:
logger.error(
"Failed to import user module %r from sys.path=%r", userModule, sys.path
)
raise
|
def _loadUserModule(cls, userModule):
"""
Imports and returns the module object represented by the given module descriptor.
:type userModule: ModuleDescriptor
"""
if not userModule.belongsToToil:
userModule = userModule.localize()
if userModule.dirPath not in sys.path:
sys.path.append(userModule.dirPath)
return importlib.import_module(userModule.name)
|
https://github.com/DataBiosphere/toil/issues/565
|
INFO:toil.batchSystems.mesos.batchSystem:Preparing to launch Mesos task 0 using offer 20151120-222317-83886090-5050-35148-O5...
INFO:toil.batchSystems.mesos.batchSystem:...launching Mesos task 0
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:Reporting file: 46c1efec_0849_4ce3_b74d_c74f1f0602a4
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Traceback (most recent call last):
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line
266, in main
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 936, in _loadJob
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 914, in _loadUserModule
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: return importlib.import_module(userModule.name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: __import__(name)
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: File "/tmp/tmpflu3Jl/5b3f3d5ba2a95124e308998df2ab7bb2/parallelMappingEvaluation.py", line 16, in <module>
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: from toillib import *
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ImportError: No module named toillib
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c4agent1
WARNING:toil.leader:46c1efec_0849_4ce3_b74d_c74f1f0602a4: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 46c1efec_0849_4ce3_b74d_c74f1f0602a4 to 0
WARNING:toil.leader:Job: 46c1efec_0849_4ce3_b74d_c74f1f0602a4 is completely failed
|
ImportError
|
def __init__(self):
# Core options
self.jobStore = os.path.abspath("./toil")
self.logLevel = getLogLevelString()
self.workDir = None
self.stats = False
# Because the stats option needs the jobStore to persist past the end of the run,
# the clean default value depends the specified stats option and is determined in setOptions
self.clean = None
# Restarting the workflow options
self.restart = False
# Batch system options
self.batchSystem = "singleMachine"
self.scale = 1
self.mesosMasterAddress = "localhost:5050"
self.parasolCommand = "parasol"
self.parasolMaxBatches = 10000
self.environment = {}
# Resource requirements
self.defaultMemory = 2147483648
self.defaultCores = 1
self.defaultDisk = 2147483648
self.defaultCache = self.defaultDisk
self.maxCores = sys.maxint
self.maxMemory = sys.maxint
self.maxDisk = sys.maxint
# Retrying/rescuing jobs
self.retryCount = 0
self.maxJobDuration = sys.maxint
self.rescueJobsFrequency = 3600
# Misc
self.maxLogFileSize = 50120
self.sseKey = None
self.cseKey = None
# Debug options
self.badWorker = 0.0
self.badWorkerFailInterval = 0.01
|
def __init__(self):
# Core options
self.jobStore = os.path.abspath("./toil")
self.logLevel = getLogLevelString()
self.workDir = None
self.stats = False
# Because the stats option needs the jobStore to persist past the end of the run,
# the clean default value depends the specified stats option and is determined in setOptions
self.clean = None
# Restarting the workflow options
self.restart = False
# Batch system options
self.batchSystem = "singleMachine"
self.scale = 1
self.mesosMasterAddress = "localhost:5050"
self.parasolCommand = "parasol"
self.parasolMaxBatches = 10000
self.environment = {}
# Resource requirements
self.defaultMemory = 2147483648
self.defaultCores = 1
self.defaultDisk = 2147483648
self.defaultCache = 2147483648 # Cache is 2GB
self.maxCores = sys.maxint
self.maxMemory = sys.maxint
self.maxDisk = sys.maxint
# Retrying/rescuing jobs
self.retryCount = 0
self.maxJobDuration = sys.maxint
self.rescueJobsFrequency = 3600
# Misc
self.maxLogFileSize = 50120
self.sseKey = None
self.cseKey = None
# Debug options
self.badWorker = 0.0
self.badWorkerFailInterval = 0.01
|
https://github.com/DataBiosphere/toil/issues/551
|
Traceback (most recent call last):
File "hello_world.py", line 10, in <module>
result = Job.Runner.startToil( job, options )
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 436, in startToil
rootJob = job._serialiseFirstJob(jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1246, in _serialiseFirstJob
predecessorNumber=0)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1104, in _createEmptyJobForJob
" than the disk requirement for the job! (disk: %s)" % (cache, disk))
RuntimeError: Trying to allocate a cache (cache: 2147483648.0) larger than the disk requirement for the job! (disk: 1000000)
|
RuntimeError
|
def setOptions(self, options):
"""
Creates a config object from the options object.
"""
from bd2k.util.humanize import human2bytes # This import is used to convert
# from human readable quantites to integers
def setOption(varName, parsingFn=None, checkFn=None):
# If options object has the option "varName" specified
# then set the "varName" attrib to this value in the config object
x = getattr(options, varName, None)
if x is not None:
if parsingFn is not None:
x = parsingFn(x)
if checkFn is not None:
try:
checkFn(x)
except AssertionError:
raise RuntimeError(
"The %s option has an invalid value: %s" % (varName, x)
)
setattr(self, varName, x)
# Function to parse integer from string expressed in different formats
h2b = lambda x: human2bytes(str(x))
def iC(minValue, maxValue=sys.maxint):
# Returns function that checks if a given int is in the given half-open interval
assert isinstance(minValue, int) and isinstance(maxValue, int)
return lambda x: minValue <= x < maxValue
def fC(minValue, maxValue=None):
# Returns function that checks if a given float is in the given half-open interval
assert isinstance(minValue, float)
if maxValue is None:
return lambda x: minValue <= x
else:
assert isinstance(maxValue, float)
return lambda x: minValue <= x < maxValue
# Core options
setOption(
"jobStore",
parsingFn=lambda x: os.path.abspath(x)
if options.jobStore.startswith(".")
else x,
)
# TODO: LOG LEVEL STRING
setOption("workDir")
setOption("stats")
setOption("clean")
if self.stats:
if self.clean != "never" and self.clean is not None:
raise RuntimeError(
"Contradicting options passed: Clean flag is set to %s "
"despite the stats flag requiring "
"the jobStore to be intact at the end of the run. "
"Set clean to 'never'" % self.clean
)
self.clean = "never"
elif self.clean is None:
self.clean = "onSuccess"
# Restarting the workflow options
setOption("restart")
# Batch system options
setOption("batchSystem")
setOption("scale", float, fC(0.0))
setOption("mesosMasterAddress")
setOption("parasolCommand")
setOption("parasolMaxBatches", int, iC(1))
setOption("environment", parseSetEnv)
# Resource requirements
setOption("defaultMemory", h2b, iC(1))
setOption("defaultCores", float, fC(1.0))
setOption("defaultDisk", h2b, iC(1))
setOption("defaultCache", h2b, iC(0))
setOption("maxCores", int, iC(1))
setOption("maxMemory", h2b, iC(1))
setOption("maxDisk", h2b, iC(1))
# Retrying/rescuing jobs
setOption("retryCount", int, iC(0))
setOption("maxJobDuration", int, iC(1))
setOption("rescueJobsFrequency", int, iC(1))
# Misc
setOption("maxLogFileSize", h2b, iC(1))
def checkSse(sseKey):
with open(sseKey) as f:
assert len(f.readline().rstrip()) == 32
setOption("sseKey", checkFn=checkSse)
setOption("cseKey", checkFn=checkSse)
# Debug options
setOption("badWorker", float, fC(0.0, 1.0))
setOption("badWorkerFailInterval", float, fC(0.0))
|
def setOptions(self, options):
"""
Creates a config object from the options object.
"""
from bd2k.util.humanize import human2bytes # This import is used to convert
# from human readable quantites to integers
def setOption(varName, parsingFn=None, checkFn=None):
# If options object has the option "varName" specified
# then set the "varName" attrib to this value in the config object
x = getattr(options, varName, None)
if x != None:
if parsingFn != None:
x = parsingFn(x)
if checkFn != None:
try:
checkFn(x)
except AssertionError:
raise RuntimeError(
"The %s option has an invalid value: %s" % (varName, x)
)
setattr(self, varName, x)
# Function to parse integer from string expressed in different formats
h2b = lambda x: human2bytes(str(x))
def iC(minValue, maxValue=sys.maxint):
# Returns function that checks if a given int is in the given half-open interval
assert isinstance(minValue, int) and isinstance(maxValue, int)
return lambda x: minValue <= x < maxValue
def fC(minValue, maxValue=None):
# Returns function that checks if a given float is in the given half-open interval
assert isinstance(minValue, float)
if maxValue is None:
return lambda x: minValue <= x
else:
assert isinstance(maxValue, float)
return lambda x: minValue <= x < maxValue
# Core options
setOption(
"jobStore",
parsingFn=lambda x: os.path.abspath(x)
if options.jobStore.startswith(".")
else x,
)
# TODO: LOG LEVEL STRING
setOption("workDir")
setOption("stats")
setOption("clean")
if self.stats:
if self.clean != "never" and self.clean is not None:
raise RuntimeError(
"Contradicting options passed: Clean flag is set to %s "
"despite the stats flag requiring "
"the jobStore to be intact at the end of the run. "
"Set clean to 'never'" % self.clean
)
self.clean = "never"
elif self.clean is None:
self.clean = "onSuccess"
# Restarting the workflow options
setOption("restart")
# Batch system options
setOption("batchSystem")
setOption("scale", float, fC(0.0))
setOption("mesosMasterAddress")
setOption("parasolCommand")
setOption("parasolMaxBatches", int, iC(1))
setOption("environment", parseSetEnv)
# Resource requirements
setOption("defaultMemory", h2b, iC(1))
setOption("defaultCores", float, fC(1.0))
setOption("defaultDisk", h2b, iC(1))
setOption("defaultCache", h2b, iC(0))
setOption("maxCores", int, iC(1))
setOption("maxMemory", h2b, iC(1))
setOption("maxDisk", h2b, iC(1))
# Retrying/rescuing jobs
setOption("retryCount", int, iC(0))
setOption("maxJobDuration", int, iC(1))
setOption("rescueJobsFrequency", int, iC(1))
# Misc
setOption("maxLogFileSize", h2b, iC(1))
def checkSse(sseKey):
with open(sseKey) as f:
assert len(f.readline().rstrip()) == 32
setOption("sseKey", checkFn=checkSse)
setOption("cseKey", checkFn=checkSse)
# Debug options
setOption("badWorker", float, fC(0.0, 1.0))
setOption("badWorkerFailInterval", float, fC(0.0))
|
https://github.com/DataBiosphere/toil/issues/551
|
Traceback (most recent call last):
File "hello_world.py", line 10, in <module>
result = Job.Runner.startToil( job, options )
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 436, in startToil
rootJob = job._serialiseFirstJob(jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1246, in _serialiseFirstJob
predecessorNumber=0)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1104, in _createEmptyJobForJob
" than the disk requirement for the job! (disk: %s)" % (cache, disk))
RuntimeError: Trying to allocate a cache (cache: 2147483648.0) larger than the disk requirement for the job! (disk: 1000000)
|
RuntimeError
|
def setOption(varName, parsingFn=None, checkFn=None):
# If options object has the option "varName" specified
# then set the "varName" attrib to this value in the config object
x = getattr(options, varName, None)
if x is not None:
if parsingFn is not None:
x = parsingFn(x)
if checkFn is not None:
try:
checkFn(x)
except AssertionError:
raise RuntimeError(
"The %s option has an invalid value: %s" % (varName, x)
)
setattr(self, varName, x)
|
def setOption(varName, parsingFn=None, checkFn=None):
# If options object has the option "varName" specified
# then set the "varName" attrib to this value in the config object
x = getattr(options, varName, None)
if x != None:
if parsingFn != None:
x = parsingFn(x)
if checkFn != None:
try:
checkFn(x)
except AssertionError:
raise RuntimeError(
"The %s option has an invalid value: %s" % (varName, x)
)
setattr(self, varName, x)
|
https://github.com/DataBiosphere/toil/issues/551
|
Traceback (most recent call last):
File "hello_world.py", line 10, in <module>
result = Job.Runner.startToil( job, options )
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 436, in startToil
rootJob = job._serialiseFirstJob(jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1246, in _serialiseFirstJob
predecessorNumber=0)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1104, in _createEmptyJobForJob
" than the disk requirement for the job! (disk: %s)" % (cache, disk))
RuntimeError: Trying to allocate a cache (cache: 2147483648.0) larger than the disk requirement for the job! (disk: 1000000)
|
RuntimeError
|
def _addOptions(addGroupFn, config):
#
# Core options
#
addOptionFn = addGroupFn(
"toil core options",
"Options to specify the \
location of the toil and turn on stats collation about the performance of jobs.",
)
# TODO - specify how this works when path is AWS
addOptionFn(
"jobStore",
type=str,
help=(
"Store in which to place job management files \
and the global accessed temporary files"
"(If this is a file path this needs to be globally accessible "
"by all machines running jobs).\n"
"If the store already exists and restart is false an"
" ExistingJobStoreException exception will be thrown."
),
)
addOptionFn(
"--workDir",
dest="workDir",
default=None,
help="Absolute path to directory where temporary files generated during the Toil run should be placed. "
"Default is determined by environmental variables (TMPDIR, TEMP, TMP) via mkdtemp",
)
addOptionFn(
"--stats",
dest="stats",
action="store_true",
default=None,
help="Records statistics about the toil workflow to be used by 'toil stats'.",
)
addOptionFn(
"--clean",
dest="clean",
choices=["always", "onError", "never", "onSuccess"],
default=None,
help=(
"Determines the deletion of the jobStore upon completion of the program. "
"Choices: 'always', 'onError','never', 'onSuccess'. The --stats option requires "
"information from the jobStore upon completion so the jobStore will never be deleted with"
"that flag. If you wish to be able to restart the run, choose 'never' or 'onSuccess'. "
"Default is 'never' if stats is enabled, and 'onSuccess' otherwise"
),
)
#
# Restarting the workflow options
#
addOptionFn = addGroupFn(
"toil options for restarting an existing workflow",
"Allows the restart of an existing workflow",
)
addOptionFn(
"--restart",
dest="restart",
default=None,
action="store_true",
help="If --restart is specified then will attempt to restart existing workflow "
"at the location pointed to by the --jobStore option. Will raise an exception if the workflow does not exist",
)
#
# Batch system options
#
addOptionFn = addGroupFn(
"toil options for specifying the batch system",
"Allows the specification of the batch system, and arguments to the batch system/big batch system (see below).",
)
addOptionFn(
"--batchSystem",
dest="batchSystem",
default=None,
help=(
"The type of batch system to run the job(s) with, currently can be one "
"of singleMachine, parasol, gridEngine, lsf or mesos'. default=%s"
% config.batchSystem
),
)
addOptionFn(
"--scale",
dest="scale",
default=None,
help=(
"A scaling factor to change the value of all submitted tasks's submitted cores. "
"Used in singleMachine batch system. default=%s" % config.scale
),
)
addOptionFn(
"--mesosMaster",
dest="mesosMasterAddress",
default=None,
help=(
"The host and port of the Mesos master separated by colon. default=%s"
% config.mesosMasterAddress
),
)
addOptionFn(
"--parasolCommand",
dest="parasolCommand",
default=None,
help="The name or path of the parasol program. Will be looked up on PATH "
"unless it starts with a slashdefault=%s" % config.parasolCommand,
)
addOptionFn(
"--parasolMaxBatches",
dest="parasolMaxBatches",
default=None,
help="Maximum number of job batches the Parasol batch is allowed to create. One "
"batch is created for jobs with a a unique set of resource requirements. "
"default=%i" % config.parasolMaxBatches,
)
#
# Resource requirements
#
addOptionFn = addGroupFn(
"toil options for cores/memory requirements",
"The options to specify default cores/memory requirements (if not "
"specified by the jobs themselves), and to limit the total amount of "
"memory/cores requested from the batch system.",
)
addOptionFn(
"--defaultMemory",
dest="defaultMemory",
default=None,
metavar="INT",
help="The default amount of memory to request for a job. Only applicable to jobs "
"that do not specify an explicit value for this requirement. Standard "
"suffixes like K, Ki, M, Mi, G or Gi are supported. Default is %s"
% bytes2human(config.defaultMemory, symbols="iec"),
)
addOptionFn(
"--defaultCores",
dest="defaultCores",
default=None,
metavar="FLOAT",
help="The default number of CPU cores to dedicate a job. Only applicable to jobs "
"that do not specify an explicit value for this requirement. Fractions of a "
"core (for example 0.1) are supported on some batch systems, namely Mesos "
"and singleMachine. Default is %.1f " % config.defaultCores,
)
addOptionFn(
"--defaultDisk",
dest="defaultDisk",
default=None,
metavar="INT",
help="The default amount of disk space to dedicate a job. Only applicable to jobs "
"that do not specify an explicit value for this requirement. Standard "
"suffixes like K, Ki, M, Mi, G or Gi are supported. Default is %s"
% bytes2human(config.defaultDisk, symbols="iec"),
)
addOptionFn(
"--defaultCache",
dest="defaultCache",
default=None,
metavar="INT",
help="The default amount of disk space to use for caching files shared between "
"jobs. Only applicable to jobs that do not specify an explicit value for "
"this requirement. Standard suffixes like K, Ki, M, Mi, G or Gi are "
"supported. Default is %s" % bytes2human(config.defaultCache, symbols="iec"),
)
addOptionFn(
"--maxCores",
dest="maxCores",
default=None,
metavar="INT",
help="The maximum number of CPU cores to request from the batch system at any one "
"time. Standard suffixes like K, Ki, M, Mi, G or Gi are supported. Default "
"is %s" % bytes2human(config.maxCores, symbols="iec"),
)
addOptionFn(
"--maxMemory",
dest="maxMemory",
default=None,
metavar="INT",
help="The maximum amount of memory to request from the batch system at any one "
"time. Standard suffixes like K, Ki, M, Mi, G or Gi are supported. Default "
"is %s" % bytes2human(config.maxMemory, symbols="iec"),
)
addOptionFn(
"--maxDisk",
dest="maxDisk",
default=None,
metavar="INT",
help="The maximum amount of disk space to request from the batch system at any "
"one time. Standard suffixes like K, Ki, M, Mi, G or Gi are supported. "
"Default is %s" % bytes2human(config.maxDisk, symbols="iec"),
)
#
# Retrying/rescuing jobs
#
addOptionFn = addGroupFn(
"toil options for rescuing/killing/restarting jobs",
"The options for jobs that either run too long/fail or get lost \
(some batch systems have issues!)",
)
addOptionFn(
"--retryCount",
dest="retryCount",
default=None,
help=(
"Number of times to retry a failing job before giving up and "
"labeling job failed. default=%s" % config.retryCount
),
)
addOptionFn(
"--maxJobDuration",
dest="maxJobDuration",
default=None,
help=(
"Maximum runtime of a job (in seconds) before we kill it "
"(this is a lower bound, and the actual time before killing "
"the job may be longer). default=%s" % config.maxJobDuration
),
)
addOptionFn(
"--rescueJobsFrequency",
dest="rescueJobsFrequency",
default=None,
help=(
"Period of time to wait (in seconds) between checking for "
"missing/overlong jobs, that is jobs which get lost by the batch system. Expert parameter. default=%s"
% config.rescueJobsFrequency
),
)
#
# Misc options
#
addOptionFn = addGroupFn("toil miscellaneous options", "Miscellaneous options")
addOptionFn(
"--maxLogFileSize",
dest="maxLogFileSize",
default=None,
help=(
"The maximum size of a job log file to keep (in bytes), log files larger "
"than this will be truncated to the last X bytes. Default is 50 "
"kilobytes, default=%s" % config.maxLogFileSize
),
)
addOptionFn(
"--sseKey",
dest="sseKey",
default=None,
help="Path to file containing 32 character key to be used for server-side encryption on awsJobStore. SSE will "
"not be used if this flag is not passed.",
)
addOptionFn(
"--cseKey",
dest="cseKey",
default=None,
help="Path to file containing 256-bit key to be used for client-side encryption on "
"azureJobStore. By default, no encryption is used.",
)
addOptionFn(
"--setEnv",
"-e",
metavar="NAME=VALUE or NAME",
dest="environment",
default=[],
action="append",
help="Set an environment variable early on in the worker. If VALUE is omitted, "
"it will be looked up in the current environment. Independently of this "
"option, the worker will try to emulate the leader's environment before "
"running a job. Using this option, a variable can be injected into the "
"worker process itself before it is started.",
)
#
# Debug options
#
addOptionFn = addGroupFn("toil debug options", "Debug options")
addOptionFn(
"--badWorker",
dest="badWorker",
default=None,
help=(
"For testing purposes randomly kill 'badWorker' proportion of jobs using SIGKILL, default=%s"
% config.badWorker
),
)
addOptionFn(
"--badWorkerFailInterval",
dest="badWorkerFailInterval",
default=None,
help=(
"When killing the job pick uniformly within the interval from 0.0 to "
"'badWorkerFailInterval' seconds after the worker starts, default=%s"
% config.badWorkerFailInterval
),
)
|
def _addOptions(addGroupFn, config):
#
# Core options
#
addOptionFn = addGroupFn(
"toil core options",
"Options to specify the \
location of the toil and turn on stats collation about the performance of jobs.",
)
# TODO - specify how this works when path is AWS
addOptionFn(
"jobStore",
type=str,
help=(
"Store in which to place job management files \
and the global accessed temporary files"
"(If this is a file path this needs to be globally accessible "
"by all machines running jobs).\n"
"If the store already exists and restart is false an"
" ExistingJobStoreException exception will be thrown."
),
)
addOptionFn(
"--workDir",
dest="workDir",
default=None,
help="Absolute path to directory where temporary files generated during the Toil run should be placed. "
"Default is determined by environmental variables (TMPDIR, TEMP, TMP) via mkdtemp",
)
addOptionFn(
"--stats",
dest="stats",
action="store_true",
default=None,
help="Records statistics about the toil workflow to be used by 'toil stats'.",
)
addOptionFn(
"--clean",
dest="clean",
choices=["always", "onError", "never", "onSuccess"],
default=None,
help=(
"Determines the deletion of the jobStore upon completion of the program. "
"Choices: 'always', 'onError','never', 'onSuccess'. The --stats option requires "
"information from the jobStore upon completion so the jobStore will never be deleted with"
"that flag. If you wish to be able to restart the run, choose 'never' or 'onSuccess'. "
"Default is 'never' if stats is enabled, and 'onSuccess' otherwise"
),
)
#
# Restarting the workflow options
#
addOptionFn = addGroupFn(
"toil options for restarting an existing workflow",
"Allows the restart of an existing workflow",
)
addOptionFn(
"--restart",
dest="restart",
default=None,
action="store_true",
help="If --restart is specified then will attempt to restart existing workflow "
"at the location pointed to by the --jobStore option. Will raise an exception if the workflow does not exist",
)
#
# Batch system options
#
addOptionFn = addGroupFn(
"toil options for specifying the batch system",
"Allows the specification of the batch system, and arguments to the batch system/big batch system (see below).",
)
addOptionFn(
"--batchSystem",
dest="batchSystem",
default=None,
help=(
"The type of batch system to run the job(s) with, currently can be one "
"of singleMachine, parasol, gridEngine, lsf or mesos'. default=%s"
% config.batchSystem
),
)
addOptionFn(
"--scale",
dest="scale",
default=None,
help=(
"A scaling factor to change the value of all submitted tasks's submitted cores. "
"Used in singleMachine batch system. default=%s" % config.scale
),
)
addOptionFn(
"--mesosMaster",
dest="mesosMasterAddress",
default=None,
help=(
"The host and port of the Mesos master separated by colon. default=%s"
% config.mesosMasterAddress
),
)
addOptionFn(
"--parasolCommand",
dest="parasolCommand",
default=None,
help="The name or path of the parasol program. Will be looked up on PATH "
"unless it starts with a slashdefault=%s" % config.parasolCommand,
)
addOptionFn(
"--parasolMaxBatches",
dest="parasolMaxBatches",
default=None,
help="Maximum number of job batches the Parasol batch is allowed to create. One "
"batch is created for jobs with a a unique set of resource requirements. "
"default=%i" % config.parasolMaxBatches,
)
#
# Resource requirements
#
addOptionFn = addGroupFn(
"toil options for cores/memory requirements",
"The options to specify default cores/memory requirements (if not specified by the jobs themselves), and to limit the total amount of memory/cores requested from the batch system.",
)
addOptionFn(
"--defaultMemory",
dest="defaultMemory",
default=None,
help=(
"The default amount of memory to request for a job (in bytes), "
"by default is 2^31 = 2 gigabytes, default=%s" % config.defaultMemory
),
)
addOptionFn(
"--defaultCores",
dest="defaultCores",
default=None,
help="The default number of cpu cores to dedicate a job. default=%s"
% config.defaultCores,
)
addOptionFn(
"--defaultDisk",
dest="defaultDisk",
default=None,
help="The default amount of disk space to dedicate a job (in bytes). default=%s"
% config.defaultDisk,
)
addOptionFn(
"--defaultCache",
dest="defaultCache",
default=None,
help=(
"The default amount of disk space to use in caching "
"files shared between jobs. This must be less than the disk requirement "
"for the job default=%s" % config.defaultCache
),
)
addOptionFn(
"--maxCores",
dest="maxCores",
default=None,
help=(
"The maximum number of cpu cores to request from the batch system at any "
"one time. default=%s" % config.maxCores
),
)
addOptionFn(
"--maxMemory",
dest="maxMemory",
default=None,
help=(
"The maximum amount of memory to request from the batch \
system at any one time. default=%s"
% config.maxMemory
),
)
addOptionFn(
"--maxDisk",
dest="maxDisk",
default=None,
help=(
"The maximum amount of disk space to request from the batch \
system at any one time. default=%s"
% config.maxDisk
),
)
#
# Retrying/rescuing jobs
#
addOptionFn = addGroupFn(
"toil options for rescuing/killing/restarting jobs",
"The options for jobs that either run too long/fail or get lost \
(some batch systems have issues!)",
)
addOptionFn(
"--retryCount",
dest="retryCount",
default=None,
help=(
"Number of times to retry a failing job before giving up and "
"labeling job failed. default=%s" % config.retryCount
),
)
addOptionFn(
"--maxJobDuration",
dest="maxJobDuration",
default=None,
help=(
"Maximum runtime of a job (in seconds) before we kill it "
"(this is a lower bound, and the actual time before killing "
"the job may be longer). default=%s" % config.maxJobDuration
),
)
addOptionFn(
"--rescueJobsFrequency",
dest="rescueJobsFrequency",
default=None,
help=(
"Period of time to wait (in seconds) between checking for "
"missing/overlong jobs, that is jobs which get lost by the batch system. Expert parameter. default=%s"
% config.rescueJobsFrequency
),
)
#
# Misc options
#
addOptionFn = addGroupFn("toil miscellaneous options", "Miscellaneous options")
addOptionFn(
"--maxLogFileSize",
dest="maxLogFileSize",
default=None,
help=(
"The maximum size of a job log file to keep (in bytes), log files larger "
"than this will be truncated to the last X bytes. Default is 50 "
"kilobytes, default=%s" % config.maxLogFileSize
),
)
addOptionFn(
"--sseKey",
dest="sseKey",
default=None,
help="Path to file containing 32 character key to be used for server-side encryption on awsJobStore. SSE will "
"not be used if this flag is not passed.",
)
addOptionFn(
"--cseKey",
dest="cseKey",
default=None,
help="Path to file containing 256-bit key to be used for client-side encryption on "
"azureJobStore. By default, no encryption is used.",
)
addOptionFn(
"--setEnv",
"-e",
metavar="NAME=VALUE or NAME",
dest="environment",
default=[],
action="append",
help="Set an environment variable early on in the worker. If VALUE is omitted, "
"it will be looked up in the current environment. Independently of this "
"option, the worker will try to emulate the leader's environment before "
"running a job. Using this option, a variable can be injected into the "
"worker process itself before it is started.",
)
#
# Debug options
#
addOptionFn = addGroupFn("toil debug options", "Debug options")
addOptionFn(
"--badWorker",
dest="badWorker",
default=None,
help=(
"For testing purposes randomly kill 'badWorker' proportion of jobs using SIGKILL, default=%s"
% config.badWorker
),
)
addOptionFn(
"--badWorkerFailInterval",
dest="badWorkerFailInterval",
default=None,
help=(
"When killing the job pick uniformly within the interval from 0.0 to "
"'badWorkerFailInterval' seconds after the worker starts, default=%s"
% config.badWorkerFailInterval
),
)
|
https://github.com/DataBiosphere/toil/issues/551
|
Traceback (most recent call last):
File "hello_world.py", line 10, in <module>
result = Job.Runner.startToil( job, options )
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 436, in startToil
rootJob = job._serialiseFirstJob(jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1246, in _serialiseFirstJob
predecessorNumber=0)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1104, in _createEmptyJobForJob
" than the disk requirement for the job! (disk: %s)" % (cache, disk))
RuntimeError: Trying to allocate a cache (cache: 2147483648.0) larger than the disk requirement for the job! (disk: 1000000)
|
RuntimeError
|
def _createEmptyJobForJob(self, jobStore, command=None, predecessorNumber=0):
"""
Create an empty job for the job.
"""
requirements = self.effectiveRequirements(jobStore.config)
del requirements.cache
return jobStore.create(
command=command, predecessorNumber=predecessorNumber, **requirements
)
|
def _createEmptyJobForJob(self, jobStore, command=None, predecessorNumber=0):
"""
Create an empty job for the job.
"""
memory = (
self.memory if self.memory is not None else float(jobStore.config.defaultMemory)
)
cores = (
self.cores if self.cores is not None else float(jobStore.config.defaultCores)
)
disk = self.disk if self.disk is not None else float(jobStore.config.defaultDisk)
cache = (
self.cache if self.cache is not None else float(jobStore.config.defaultCache)
)
if cache > disk:
raise RuntimeError(
"Trying to allocate a cache (cache: %s) larger"
" than the disk requirement for the job! (disk: %s)" % (cache, disk)
)
return jobStore.create(
command=command,
memory=memory,
cores=cores,
disk=disk,
predecessorNumber=predecessorNumber,
)
|
https://github.com/DataBiosphere/toil/issues/551
|
Traceback (most recent call last):
File "hello_world.py", line 10, in <module>
result = Job.Runner.startToil( job, options )
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 436, in startToil
rootJob = job._serialiseFirstJob(jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1246, in _serialiseFirstJob
predecessorNumber=0)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1104, in _createEmptyJobForJob
" than the disk requirement for the job! (disk: %s)" % (cache, disk))
RuntimeError: Trying to allocate a cache (cache: 2147483648.0) larger than the disk requirement for the job! (disk: 1000000)
|
RuntimeError
|
def __init__(self, config, maxCores, maxMemory, maxDisk):
"""This method must be called.
The config object is setup by the toilSetup script and
has configuration parameters for the jobtree. You can add stuff
to that script to get parameters for your batch system.
"""
self.config = config
self.maxCores = maxCores
self.maxMemory = maxMemory
self.maxDisk = maxDisk
self.environment = {}
"""
:type dict[str,str]
"""
|
def __init__(self, config, maxCores, maxMemory, maxDisk):
"""This method must be called.
The config object is setup by the toilSetup script and
has configuration parameters for the jobtree. You can add stuff
to that script to get parameters for your batch system.
"""
self.config = config
self.maxCores = maxCores
self.maxMemory = maxMemory
self.maxDisk = maxDisk
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def __init__(
self,
config,
maxCores,
maxMemory,
maxDisk,
masterAddress,
userScript=None,
toilDistribution=None,
):
AbstractBatchSystem.__init__(self, config, maxCores, maxMemory, maxDisk)
# The hot-deployed resources representing the user script and the toil distribution
# respectively. Will be passed along in every Mesos task. See
# toil.common.HotDeployedResource for details.
self.userScript = userScript
self.toilDistribution = toilDistribution
# Written to when mesos kills tasks, as directed by toil
self.killedSet = set()
# Dictionary of queues, which toil assigns jobs to. Each queue represents a job type,
# defined by resource usage
self.jobQueueList = defaultdict(list)
# Address of Mesos master in the form host:port where host can be an IP or a hostname
self.masterAddress = masterAddress
# queue of jobs to kill, by jobID.
self.killSet = set()
# contains jobs on which killBatchJobs were called,
# regardless of whether or not they actually were killed or
# ended by themselves.
self.intendedKill = set()
# Dict of launched jobIDs to TaskData named tuple. Contains start time, executorID, and slaveID.
self.runningJobMap = {}
# Queue of jobs whose status has been updated, according to mesos. Req'd by toil
self.updatedJobsQueue = Queue()
# Whether to use implicit/explicit acknowledgments
self.implicitAcknowledgements = self.getImplicit()
# Reference to the Mesos driver used by this scheduler, to be instantiated in run()
self.driver = None
# FIXME: This comment makes no sense to me
# Returns Mesos executor object, which is merged into Mesos tasks as they are built
self.executor = self.buildExecutor()
self.nextJobID = 0
self.lastReconciliation = time.time()
self.reconciliationPeriod = 120
# Start the driver
self._startDriver()
|
def __init__(
self,
config,
maxCores,
maxMemory,
maxDisk,
masterAddress,
userScript=None,
toilDistribution=None,
):
AbstractBatchSystem.__init__(self, config, maxCores, maxMemory, maxDisk)
# The hot-deployed resources representing the user script and the toil distribution
# respectively. Will be passed along in every Mesos task. See
# toil.common.HotDeployedResource for details.
self.userScript = userScript
self.toilDistribution = toilDistribution
# Written to when mesos kills tasks, as directed by toil
self.killedSet = set()
# Dictionary of queues, which toil assigns jobs to. Each queue represents a job type,
# defined by resource usage
self.jobQueueList = defaultdict(list)
# Address of Mesos master in the form host:port where host can be an IP or a hostname
self.masterAddress = masterAddress
# queue of jobs to kill, by jobID.
self.killSet = set()
# contains jobs on which killBatchJobs were called,
# regardless of whether or not they actually were killed or
# ended by themselves.
self.intendedKill = set()
# Dict of launched jobIDs to TaskData named tuple. Contains start time, executorID, and slaveID.
self.runningJobMap = {}
# Queue of jobs whose status has been updated, according to mesos. Req'd by toil
self.updatedJobsQueue = Queue()
# Wether to use implicit/explicit acknowledgments
self.implicitAcknowledgements = self.getImplicit()
# Reference to the Mesos driver used by this scheduler, to be instantiated in run()
self.driver = None
# FIXME: This comment makes no sense to me
# Returns Mesos executor object, which is merged into Mesos tasks as they are built
self.executor = self.buildExecutor()
self.nextJobID = 0
self.lastReconciliation = time.time()
self.reconciliationPeriod = 120
# Start the driver
self._startDriver()
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def issueBatchJob(self, command, memory, cores, disk):
"""
Issues the following command returning a unique jobID. Command is the string to run, memory is an int giving
the number of bytes the job needs to run in and cores is the number of cpus needed for the job and error-file
is the path of the file to place any std-err/std-out in.
"""
# puts job into job_type_queue to be run by Mesos, AND puts jobID in current_job[]
self.checkResourceRequest(memory, cores, disk)
jobID = self.nextJobID
self.nextJobID += 1
job = ToilJob(
jobID=jobID,
resources=ResourceRequirement(memory=memory, cores=cores, disk=disk),
command=command,
userScript=self.userScript,
toilDistribution=self.toilDistribution,
environment=self.environment.copy(),
)
job_type = job.resources
log.debug(
"Queueing the job command: %s with job id: %s ..." % (command, str(jobID))
)
self.jobQueueList[job_type].append(job)
log.debug("... queued")
return jobID
|
def issueBatchJob(self, command, memory, cores, disk):
"""
Issues the following command returning a unique jobID. Command is the string to run, memory is an int giving
the number of bytes the job needs to run in and cores is the number of cpus needed for the job and error-file
is the path of the file to place any std-err/std-out in.
"""
# puts job into job_type_queue to be run by Mesos, AND puts jobID in current_job[]
self.checkResourceRequest(memory, cores, disk)
jobID = self.nextJobID
self.nextJobID += 1
job = ToilJob(
jobID=jobID,
resources=ResourceRequirement(memory=memory, cores=cores, disk=disk),
command=command,
userScript=self.userScript,
toilDistribution=self.toilDistribution,
)
job_type = job.resources
log.debug(
"Queueing the job command: %s with job id: %s ..." % (command, str(jobID))
)
self.jobQueueList[job_type].append(job)
log.debug("... queued")
return jobID
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def launchTask(self, driver, task):
"""
Invoked by SchedulerDriver when a Mesos task should be launched by this executor
"""
def runTask():
log.debug("Running task %s", task.task_id.value)
sendUpdate(mesos_pb2.TASK_RUNNING)
popen = runJob(pickle.loads(task.data))
self.runningTasks[task.task_id.value] = popen.pid
try:
exitStatus = popen.wait()
if 0 == exitStatus:
sendUpdate(mesos_pb2.TASK_FINISHED)
elif -9 == exitStatus:
sendUpdate(mesos_pb2.TASK_KILLED)
else:
sendUpdate(mesos_pb2.TASK_FAILED)
except:
exc_type, exc_value, exc_trace = sys.exc_info()
sendUpdate(
mesos_pb2.TASK_FAILED,
message=str(traceback.format_exception_only(exc_type, exc_value)),
)
finally:
del self.runningTasks[task.task_id.value]
def runJob(job):
"""
:type job: toil.batchSystems.mesos.ToilJob
:rtype: subprocess.Popen
"""
if job.userScript:
job.userScript.register()
log.debug("Invoking command: '%s'", job.command)
with self.popenLock:
return subprocess.Popen(
job.command, shell=True, env=dict(os.environ, **job.environment)
)
def sendUpdate(taskState, message=""):
log.debug("Sending status update ...")
status = mesos_pb2.TaskStatus()
status.task_id.value = task.task_id.value
status.message = message
status.state = taskState
driver.sendStatusUpdate(status)
log.debug("Sent status update")
thread = threading.Thread(target=runTask)
thread.start()
|
def launchTask(self, driver, task):
"""
Invoked by SchedulerDriver when a Mesos task should be launched by this executor
"""
def runTask():
log.debug("Running task %s", task.task_id.value)
sendUpdate(mesos_pb2.TASK_RUNNING)
popen = runJob(pickle.loads(task.data))
self.runningTasks[task.task_id.value] = popen.pid
try:
exitStatus = popen.wait()
if 0 == exitStatus:
sendUpdate(mesos_pb2.TASK_FINISHED)
elif -9 == exitStatus:
sendUpdate(mesos_pb2.TASK_KILLED)
else:
sendUpdate(mesos_pb2.TASK_FAILED)
except:
exc_type, exc_value, exc_trace = sys.exc_info()
sendUpdate(
mesos_pb2.TASK_FAILED,
message=str(traceback.format_exception_only(exc_type, exc_value)),
)
finally:
del self.runningTasks[task.task_id.value]
def runJob(job):
"""
:type job: toil.batchSystems.mesos.ToilJob
:rtype: subprocess.Popen
"""
if job.userScript:
job.userScript.register()
log.debug("Invoking command: '%s'", job.command)
with self.popenLock:
return subprocess.Popen(job.command, shell=True)
def sendUpdate(taskState, message=""):
log.debug("Sending status update ...")
status = mesos_pb2.TaskStatus()
status.task_id.value = task.task_id.value
status.message = message
status.state = taskState
driver.sendStatusUpdate(status)
log.debug("Sent status update")
thread = threading.Thread(target=runTask)
thread.start()
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def runJob(job):
"""
:type job: toil.batchSystems.mesos.ToilJob
:rtype: subprocess.Popen
"""
if job.userScript:
job.userScript.register()
log.debug("Invoking command: '%s'", job.command)
with self.popenLock:
return subprocess.Popen(
job.command, shell=True, env=dict(os.environ, **job.environment)
)
|
def runJob(job):
"""
:type job: toil.batchSystems.mesos.ToilJob
:rtype: subprocess.Popen
"""
if job.userScript:
job.userScript.register()
log.debug("Invoking command: '%s'", job.command)
with self.popenLock:
return subprocess.Popen(job.command, shell=True)
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def worker(self, inputQueue):
while True:
args = inputQueue.get()
if args is None:
log.debug("Received queue sentinel.")
break
jobCommand, jobID, jobCores, jobMemory, jobDisk, environment = args
try:
coreFractions = int(jobCores / self.minCores)
log.debug(
"Acquiring %i bytes of memory from a pool of %s.",
jobMemory,
self.memory,
)
with self.memory.acquisitionOf(jobMemory):
log.debug(
"Acquiring %i fractional cores from a pool of %s to satisfy a "
"request of %f cores",
coreFractions,
self.coreFractions,
jobCores,
)
with self.coreFractions.acquisitionOf(coreFractions):
log.info("Executing command: '%s'.", jobCommand)
with self.popenLock:
popen = subprocess.Popen(
jobCommand, shell=True, env=dict(os.environ, **environment)
)
statusCode = None
info = Info(time.time(), popen, killIntended=False)
try:
self.runningJobs[jobID] = info
try:
statusCode = popen.wait()
if 0 != statusCode:
if statusCode != -9 or not info.killIntended:
log.error(
"Got exit code %i (indicating failure) from "
"command '%s'.",
statusCode,
jobCommand,
)
finally:
self.runningJobs.pop(jobID)
finally:
if statusCode is not None and not info.killIntended:
self.outputQueue.put((jobID, statusCode))
finally:
log.debug(
"Finished job. self.coreFractions ~ %s and self.memory ~ %s",
self.coreFractions.value,
self.memory.value,
)
log.debug("Exiting worker thread normally.")
|
def worker(self, inputQueue):
while True:
args = inputQueue.get()
if args is None:
log.debug("Received queue sentinel.")
break
jobCommand, jobID, jobCores, jobMemory, jobDisk = args
try:
coreFractions = int(jobCores / self.minCores)
log.debug(
"Acquiring %i bytes of memory from a pool of %s.",
jobMemory,
self.memory,
)
with self.memory.acquisitionOf(jobMemory):
log.debug(
"Acquiring %i fractional cores from a pool of %s to satisfy a "
"request of %f cores",
coreFractions,
self.coreFractions,
jobCores,
)
with self.coreFractions.acquisitionOf(coreFractions):
log.info("Executing command: '%s'.", jobCommand)
with self.popenLock:
popen = subprocess.Popen(jobCommand, shell=True)
statusCode = None
info = Info(time.time(), popen, killIntended=False)
try:
self.runningJobs[jobID] = info
try:
statusCode = popen.wait()
if 0 != statusCode:
if statusCode != -9 or not info.killIntended:
log.error(
"Got exit code %i (indicating failure) from "
"command '%s'.",
statusCode,
jobCommand,
)
finally:
self.runningJobs.pop(jobID)
finally:
if statusCode is not None and not info.killIntended:
self.outputQueue.put((jobID, statusCode))
finally:
log.debug(
"Finished job. self.coreFractions ~ %s and self.memory ~ %s",
self.coreFractions.value,
self.memory.value,
)
log.debug("Exiting worker thread normally.")
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def issueBatchJob(self, command, memory, cores, disk):
"""
Adds the command and resources to a queue to be run.
"""
# Round cores to minCores and apply scale
cores = math.ceil(cores * self.scale / self.minCores) * self.minCores
assert cores <= self.maxCores, (
"job is requesting {} cores, which is greater than {} available on the machine. Scale currently set "
"to {} consider adjusting job or scale.".format(
cores, self.maxCores, self.scale
)
)
assert cores >= self.minCores
assert memory <= self.maxMemory, (
"job requests {} mem, only {} total available.".format(memory, self.maxMemory)
)
self.checkResourceRequest(memory, cores, disk)
log.debug(
"Issuing the command: %s with memory: %i, cores: %i, disk: %i"
% (command, memory, cores, disk)
)
with self.jobIndexLock:
jobID = self.jobIndex
self.jobIndex += 1
self.jobs[jobID] = command
self.inputQueue.put((command, jobID, cores, memory, disk, self.environment.copy()))
return jobID
|
def issueBatchJob(self, command, memory, cores, disk):
"""
Adds the command and resources to a queue to be run.
"""
# Round cores to minCores and apply scale
cores = math.ceil(cores * self.scale / self.minCores) * self.minCores
assert cores <= self.maxCores, (
"job is requesting {} cores, which is greater than {} available on the machine. Scale currently set "
"to {} consider adjusting job or scale.".format(
cores, self.maxCores, self.scale
)
)
assert cores >= self.minCores
assert memory <= self.maxMemory, (
"job requests {} mem, only {} total available.".format(memory, self.maxMemory)
)
self.checkResourceRequest(memory, cores, disk)
log.debug(
"Issuing the command: %s with memory: %i, cores: %i, disk: %i"
% (command, memory, cores, disk)
)
with self.jobIndexLock:
jobID = self.jobIndex
self.jobIndex += 1
self.jobs[jobID] = command
self.inputQueue.put((command, jobID, cores, memory, disk))
return jobID
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def __init__(self):
# Core options
self.jobStore = os.path.abspath("./toil")
self.logLevel = getLogLevelString()
self.workDir = None
self.stats = False
# Because the stats option needs the jobStore to persist past the end of the run,
# the clean default value depends the specified stats option and is determined in setOptions
self.clean = None
# Restarting the workflow options
self.restart = False
# Batch system options
self.batchSystem = "singleMachine"
self.scale = 1
self.mesosMasterAddress = "localhost:5050"
self.parasolCommand = "parasol"
self.parasolMaxBatches = 10000
self.environment = {}
# Resource requirements
self.defaultMemory = 2147483648
self.defaultCores = 1
self.defaultDisk = 2147483648
self.defaultCache = 2147483648 # Cache is 2GB
self.maxCores = sys.maxint
self.maxMemory = sys.maxint
self.maxDisk = sys.maxint
# Retrying/rescuing jobs
self.retryCount = 0
self.maxJobDuration = sys.maxint
self.rescueJobsFrequency = 3600
# Misc
self.maxLogFileSize = 50120
self.sseKey = None
self.cseKey = None
# Debug options
self.badWorker = 0.0
self.badWorkerFailInterval = 0.01
|
def __init__(self):
# Core options
self.jobStore = os.path.abspath("./toil")
self.logLevel = getLogLevelString()
self.workDir = None
self.stats = False
# Because the stats option needs the jobStore to persist past the end of the run,
# the clean default value depends the specified stats option and is determined in setOptions
self.clean = None
# Restarting the workflow options
self.restart = False
# Batch system options
self.batchSystem = "singleMachine"
self.scale = 1
self.mesosMasterAddress = "localhost:5050"
self.parasolCommand = "parasol"
self.parasolMaxBatches = 10000
# Resource requirements
self.defaultMemory = 2147483648
self.defaultCores = 1
self.defaultDisk = 2147483648
self.defaultCache = 2147483648 # Cache is 2GB
self.maxCores = sys.maxint
self.maxMemory = sys.maxint
self.maxDisk = sys.maxint
# Retrying/rescuing jobs
self.retryCount = 0
self.maxJobDuration = sys.maxint
self.rescueJobsFrequency = 3600
# Misc
self.maxLogFileSize = 50120
self.sseKey = None
self.cseKey = None
# Debug options
self.badWorker = 0.0
self.badWorkerFailInterval = 0.01
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def setOptions(self, options):
"""
Creates a config object from the options object.
"""
from bd2k.util.humanize import human2bytes # This import is used to convert
# from human readable quantites to integers
def setOption(varName, parsingFn=None, checkFn=None):
# If options object has the option "varName" specified
# then set the "varName" attrib to this value in the config object
x = getattr(options, varName, None)
if x != None:
if parsingFn != None:
x = parsingFn(x)
if checkFn != None:
try:
checkFn(x)
except AssertionError:
raise RuntimeError(
"The %s option has an invalid value: %s" % (varName, x)
)
setattr(self, varName, x)
h2b = lambda x: human2bytes(
str(x)
) # Function to parse integer from string expressed in different formats
def iC(minValue, maxValue=sys.maxint):
# Returns function to check the a parameter is in a valid range
def f(x):
assert x >= minValue and x < maxValue
return f
# Core options
setOption(
"jobStore",
parsingFn=lambda x: os.path.abspath(x)
if options.jobStore.startswith(".")
else x,
)
# TODO: LOG LEVEL STRING
setOption("workDir")
setOption("stats")
setOption("clean")
if self.stats:
if self.clean != "never" and self.clean is not None:
raise RuntimeError(
"Contradicting options passed: Clean flag is set to %s "
"despite the stats flag requiring "
"the jobStore to be intact at the end of the run. "
"Set clean to 'never'" % self.clean
)
self.clean = "never"
elif self.clean is None:
self.clean = "onSuccess"
# Restarting the workflow options
setOption("restart")
# Batch system options
setOption("batchSystem")
setOption("scale", float)
setOption("mesosMasterAddress")
setOption("parasolCommand")
setOption("parasolMaxBatches", int, iC(1))
setOption("environment", parseSetEnv)
# Resource requirements
setOption("defaultMemory", h2b, iC(1))
setOption("defaultCores", h2b, iC(1))
setOption("defaultDisk", h2b, iC(1))
setOption("defaultCache", h2b, iC(0))
setOption("maxCores", h2b, iC(1))
setOption("maxMemory", h2b, iC(1))
setOption("maxDisk", h2b, iC(1))
# Retrying/rescuing jobs
setOption("retryCount", int, iC(0))
setOption("maxJobDuration", int, iC(1))
setOption("rescueJobsFrequency", int, iC(1))
# Misc
setOption("maxLogFileSize", h2b, iC(1))
def checkSse(sseKey):
with open(sseKey) as f:
assert len(f.readline().rstrip()) == 32
setOption("sseKey", checkFn=checkSse)
setOption("cseKey", checkFn=checkSse)
# Debug options
setOption("badWorker", float, iC(0, 1))
setOption("badWorkerFailInterval", float, iC(0))
|
def setOptions(self, options):
"""
Creates a config object from the options object.
"""
from bd2k.util.humanize import human2bytes # This import is used to convert
# from human readable quantites to integers
def setOption(varName, parsingFn=None, checkFn=None):
# If options object has the option "varName" specified
# then set the "varName" attrib to this value in the config object
x = getattr(options, varName, None)
if x != None:
if parsingFn != None:
x = parsingFn(x)
if checkFn != None:
try:
checkFn(x)
except AssertionError:
raise RuntimeError(
"The %s option has an invalid value: %s" % (varName, x)
)
setattr(self, varName, x)
h2b = lambda x: human2bytes(
str(x)
) # Function to parse integer from string expressed in different formats
def iC(minValue, maxValue=sys.maxint):
# Returns function to check the a parameter is in a valid range
def f(x):
assert x >= minValue and x < maxValue
return f
# Core options
setOption(
"jobStore",
parsingFn=lambda x: os.path.abspath(x)
if options.jobStore.startswith(".")
else x,
)
# TODO: LOG LEVEL STRING
setOption("workDir")
setOption("stats")
setOption("clean")
if self.stats:
if self.clean != "never" and self.clean is not None:
raise RuntimeError(
"Contradicting options passed: Clean flag is set to %s "
"despite the stats flag requiring "
"the jobStore to be intact at the end of the run. "
"Set clean to 'never'" % self.clean
)
self.clean = "never"
elif self.clean is None:
self.clean = "onSuccess"
# Restarting the workflow options
setOption("restart")
# Batch system options
setOption("batchSystem")
setOption("scale", float)
setOption("mesosMasterAddress")
setOption("parasolCommand")
setOption("parasolMaxBatches", int, iC(1))
# Resource requirements
setOption("defaultMemory", h2b, iC(1))
setOption("defaultCores", h2b, iC(1))
setOption("defaultDisk", h2b, iC(1))
setOption("defaultCache", h2b, iC(0))
setOption("maxCores", h2b, iC(1))
setOption("maxMemory", h2b, iC(1))
setOption("maxDisk", h2b, iC(1))
# Retrying/rescuing jobs
setOption("retryCount", int, iC(0))
setOption("maxJobDuration", int, iC(1))
setOption("rescueJobsFrequency", int, iC(1))
# Misc
setOption("maxLogFileSize", h2b, iC(1))
def checkSse(sseKey):
with open(sseKey) as f:
assert len(f.readline().rstrip()) == 32
setOption("sseKey", checkFn=checkSse)
setOption("cseKey", checkFn=checkSse)
# Debug options
setOption("badWorker", float, iC(0, 1))
setOption("badWorkerFailInterval", float, iC(0))
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def _addOptions(addGroupFn, config):
#
# Core options
#
addOptionFn = addGroupFn(
"toil core options",
"Options to specify the \
location of the toil and turn on stats collation about the performance of jobs.",
)
# TODO - specify how this works when path is AWS
addOptionFn(
"jobStore",
type=str,
help=(
"Store in which to place job management files \
and the global accessed temporary files"
"(If this is a file path this needs to be globally accessible "
"by all machines running jobs).\n"
"If the store already exists and restart is false an"
" ExistingJobStoreException exception will be thrown."
),
)
addOptionFn(
"--workDir",
dest="workDir",
default=None,
help="Absolute path to directory where temporary files generated during the Toil run should be placed. "
"Default is determined by environmental variables (TMPDIR, TEMP, TMP) via mkdtemp",
)
addOptionFn(
"--stats",
dest="stats",
action="store_true",
default=None,
help="Records statistics about the toil workflow to be used by 'toil stats'.",
)
addOptionFn(
"--clean",
dest="clean",
choices=["always", "onError", "never", "onSuccess"],
default=None,
help=(
"Determines the deletion of the jobStore upon completion of the program. "
"Choices: 'always', 'onError','never', 'onSuccess'. The --stats option requires "
"information from the jobStore upon completion so the jobStore will never be deleted with"
"that flag. If you wish to be able to restart the run, choose 'never' or 'onSuccess'. "
"Default is 'never' if stats is enabled, and 'onSuccess' otherwise"
),
)
#
# Restarting the workflow options
#
addOptionFn = addGroupFn(
"toil options for restarting an existing workflow",
"Allows the restart of an existing workflow",
)
addOptionFn(
"--restart",
dest="restart",
default=None,
action="store_true",
help="If --restart is specified then will attempt to restart existing workflow "
"at the location pointed to by the --jobStore option. Will raise an exception if the workflow does not exist",
)
#
# Batch system options
#
addOptionFn = addGroupFn(
"toil options for specifying the batch system",
"Allows the specification of the batch system, and arguments to the batch system/big batch system (see below).",
)
addOptionFn(
"--batchSystem",
dest="batchSystem",
default=None,
help=(
"The type of batch system to run the job(s) with, currently can be one "
"of singleMachine, parasol, gridEngine, lsf or mesos'. default=%s"
% config.batchSystem
),
)
addOptionFn(
"--scale",
dest="scale",
default=None,
help=(
"A scaling factor to change the value of all submitted tasks's submitted cores. "
"Used in singleMachine batch system. default=%s" % config.scale
),
)
addOptionFn(
"--mesosMaster",
dest="mesosMasterAddress",
default=None,
help=(
"The host and port of the Mesos master separated by colon. default=%s"
% config.mesosMasterAddress
),
)
addOptionFn(
"--parasolCommand",
dest="parasolCommand",
default=None,
help="The path to the parasol program. default=%s" % config.parasolCommand,
)
addOptionFn(
"--parasolMaxBatches",
dest="parasolMaxBatches",
default=None,
help="Maximum number of job batches the Parasol batch is allowed to create. One "
"batch is created for jobs with a a unique set of resource requirements. "
"default=%i" % config.parasolMaxBatches,
)
#
# Resource requirements
#
addOptionFn = addGroupFn(
"toil options for cores/memory requirements",
"The options to specify default cores/memory requirements (if not specified by the jobs themselves), and to limit the total amount of memory/cores requested from the batch system.",
)
addOptionFn(
"--defaultMemory",
dest="defaultMemory",
default=None,
help=(
"The default amount of memory to request for a job (in bytes), "
"by default is 2^31 = 2 gigabytes, default=%s" % config.defaultMemory
),
)
addOptionFn(
"--defaultCores",
dest="defaultCores",
default=None,
help="The default number of cpu cores to dedicate a job. default=%s"
% config.defaultCores,
)
addOptionFn(
"--defaultDisk",
dest="defaultDisk",
default=None,
help="The default amount of disk space to dedicate a job (in bytes). default=%s"
% config.defaultDisk,
)
addOptionFn(
"--defaultCache",
dest="defaultCache",
default=None,
help=(
"The default amount of disk space to use in caching "
"files shared between jobs. This must be less than the disk requirement "
"for the job default=%s" % config.defaultCache
),
)
addOptionFn(
"--maxCores",
dest="maxCores",
default=None,
help=(
"The maximum number of cpu cores to request from the batch system at any "
"one time. default=%s" % config.maxCores
),
)
addOptionFn(
"--maxMemory",
dest="maxMemory",
default=None,
help=(
"The maximum amount of memory to request from the batch \
system at any one time. default=%s"
% config.maxMemory
),
)
addOptionFn(
"--maxDisk",
dest="maxDisk",
default=None,
help=(
"The maximum amount of disk space to request from the batch \
system at any one time. default=%s"
% config.maxDisk
),
)
#
# Retrying/rescuing jobs
#
addOptionFn = addGroupFn(
"toil options for rescuing/killing/restarting jobs",
"The options for jobs that either run too long/fail or get lost \
(some batch systems have issues!)",
)
addOptionFn(
"--retryCount",
dest="retryCount",
default=None,
help=(
"Number of times to retry a failing job before giving up and "
"labeling job failed. default=%s" % config.retryCount
),
)
addOptionFn(
"--maxJobDuration",
dest="maxJobDuration",
default=None,
help=(
"Maximum runtime of a job (in seconds) before we kill it "
"(this is a lower bound, and the actual time before killing "
"the job may be longer). default=%s" % config.maxJobDuration
),
)
addOptionFn(
"--rescueJobsFrequency",
dest="rescueJobsFrequency",
default=None,
help=(
"Period of time to wait (in seconds) between checking for "
"missing/overlong jobs, that is jobs which get lost by the batch system. Expert parameter. default=%s"
% config.rescueJobsFrequency
),
)
#
# Misc options
#
addOptionFn = addGroupFn("toil miscellaneous options", "Miscellaneous options")
addOptionFn(
"--maxLogFileSize",
dest="maxLogFileSize",
default=None,
help=(
"The maximum size of a job log file to keep (in bytes), log files larger "
"than this will be truncated to the last X bytes. Default is 50 "
"kilobytes, default=%s" % config.maxLogFileSize
),
)
addOptionFn(
"--sseKey",
dest="sseKey",
default=None,
help="Path to file containing 32 character key to be used for server-side encryption on awsJobStore. SSE will "
"not be used if this flag is not passed.",
)
addOptionFn(
"--cseKey",
dest="cseKey",
default=None,
help="Path to file containing 256-bit key to be used for client-side encryption on "
"azureJobStore. By default, no encryption is used.",
)
addOptionFn(
"--setEnv",
"-e",
metavar="NAME=VALUE or NAME",
dest="environment",
default=[],
action="append",
help="Set an environment variable early on in the worker. If VALUE is omitted, "
"it will be looked up in the current environment. Independently of this "
"option, the worker will try to emulate the leader's environment before "
"running a job. Using this option, a variable can be injected into the "
"worker process itself before it is started.",
)
#
# Debug options
#
addOptionFn = addGroupFn("toil debug options", "Debug options")
addOptionFn(
"--badWorker",
dest="badWorker",
default=None,
help=(
"For testing purposes randomly kill 'badWorker' proportion of jobs using SIGKILL, default=%s"
% config.badWorker
),
)
addOptionFn(
"--badWorkerFailInterval",
dest="badWorkerFailInterval",
default=None,
help=(
"When killing the job pick uniformly within the interval from 0.0 to "
"'badWorkerFailInterval' seconds after the worker starts, default=%s"
% config.badWorkerFailInterval
),
)
|
def _addOptions(addGroupFn, config):
#
# Core options
#
addOptionFn = addGroupFn(
"toil core options",
"Options to specify the \
location of the toil and turn on stats collation about the performance of jobs.",
)
# TODO - specify how this works when path is AWS
addOptionFn(
"jobStore",
type=str,
help=(
"Store in which to place job management files \
and the global accessed temporary files"
"(If this is a file path this needs to be globally accessible "
"by all machines running jobs).\n"
"If the store already exists and restart is false an"
" ExistingJobStoreException exception will be thrown."
),
)
addOptionFn(
"--workDir",
dest="workDir",
default=None,
help="Absolute path to directory where temporary files generated during the Toil run should be placed. "
"Default is determined by environmental variables (TMPDIR, TEMP, TMP) via mkdtemp",
)
addOptionFn(
"--stats",
dest="stats",
action="store_true",
default=None,
help="Records statistics about the toil workflow to be used by 'toil stats'.",
)
addOptionFn(
"--clean",
dest="clean",
choices=["always", "onError", "never", "onSuccess"],
default=None,
help=(
"Determines the deletion of the jobStore upon completion of the program. "
"Choices: 'always', 'onError','never', 'onSuccess'. The --stats option requires "
"information from the jobStore upon completion so the jobStore will never be deleted with"
"that flag. If you wish to be able to restart the run, choose 'never' or 'onSuccess'. "
"Default is 'never' if stats is enabled, and 'onSuccess' otherwise"
),
)
#
# Restarting the workflow options
#
addOptionFn = addGroupFn(
"toil options for restarting an existing workflow",
"Allows the restart of an existing workflow",
)
addOptionFn(
"--restart",
dest="restart",
default=None,
action="store_true",
help="If --restart is specified then will attempt to restart existing workflow "
"at the location pointed to by the --jobStore option. Will raise an exception if the workflow does not exist",
)
#
# Batch system options
#
addOptionFn = addGroupFn(
"toil options for specifying the batch system",
"Allows the specification of the batch system, and arguments to the batch system/big batch system (see below).",
)
addOptionFn(
"--batchSystem",
dest="batchSystem",
default=None,
help=(
"The type of batch system to run the job(s) with, currently can be one "
"of singleMachine, parasol, gridEngine, lsf or mesos'. default=%s"
% config.batchSystem
),
)
addOptionFn(
"--scale",
dest="scale",
default=None,
help=(
"A scaling factor to change the value of all submitted tasks's submitted cores. "
"Used in singleMachine batch system. default=%s" % config.scale
),
)
addOptionFn(
"--mesosMaster",
dest="mesosMasterAddress",
default=None,
help=(
"The host and port of the Mesos master separated by colon. default=%s"
% config.mesosMasterAddress
),
)
addOptionFn(
"--parasolCommand",
dest="parasolCommand",
default=None,
help="The path to the parasol program. default=%s" % config.parasolCommand,
)
addOptionFn(
"--parasolMaxBatches",
dest="parasolMaxBatches",
default=None,
help="Maximum number of job batches the Parasol batch is allowed to create. One "
"batch is created for jobs with a a unique set of resource requirements. "
"default=%i" % config.parasolMaxBatches,
)
#
# Resource requirements
#
addOptionFn = addGroupFn(
"toil options for cores/memory requirements",
"The options to specify default cores/memory requirements (if not specified by the jobs themselves), and to limit the total amount of memory/cores requested from the batch system.",
)
addOptionFn(
"--defaultMemory",
dest="defaultMemory",
default=None,
help=(
"The default amount of memory to request for a job (in bytes), "
"by default is 2^31 = 2 gigabytes, default=%s" % config.defaultMemory
),
)
addOptionFn(
"--defaultCores",
dest="defaultCores",
default=None,
help="The default number of cpu cores to dedicate a job. default=%s"
% config.defaultCores,
)
addOptionFn(
"--defaultDisk",
dest="defaultDisk",
default=None,
help="The default amount of disk space to dedicate a job (in bytes). default=%s"
% config.defaultDisk,
)
addOptionFn(
"--defaultCache",
dest="defaultCache",
default=None,
help=(
"The default amount of disk space to use in caching "
"files shared between jobs. This must be less than the disk requirement "
"for the job default=%s" % config.defaultCache
),
)
addOptionFn(
"--maxCores",
dest="maxCores",
default=None,
help=(
"The maximum number of cpu cores to request from the batch system at any "
"one time. default=%s" % config.maxCores
),
)
addOptionFn(
"--maxMemory",
dest="maxMemory",
default=None,
help=(
"The maximum amount of memory to request from the batch \
system at any one time. default=%s"
% config.maxMemory
),
)
addOptionFn(
"--maxDisk",
dest="maxDisk",
default=None,
help=(
"The maximum amount of disk space to request from the batch \
system at any one time. default=%s"
% config.maxDisk
),
)
#
# Retrying/rescuing jobs
#
addOptionFn = addGroupFn(
"toil options for rescuing/killing/restarting jobs",
"The options for jobs that either run too long/fail or get lost \
(some batch systems have issues!)",
)
addOptionFn(
"--retryCount",
dest="retryCount",
default=None,
help=(
"Number of times to retry a failing job before giving up and "
"labeling job failed. default=%s" % config.retryCount
),
)
addOptionFn(
"--maxJobDuration",
dest="maxJobDuration",
default=None,
help=(
"Maximum runtime of a job (in seconds) before we kill it "
"(this is a lower bound, and the actual time before killing "
"the job may be longer). default=%s" % config.maxJobDuration
),
)
addOptionFn(
"--rescueJobsFrequency",
dest="rescueJobsFrequency",
default=None,
help=(
"Period of time to wait (in seconds) between checking for "
"missing/overlong jobs, that is jobs which get lost by the batch system. Expert parameter. default=%s"
% config.rescueJobsFrequency
),
)
#
# Misc options
#
addOptionFn = addGroupFn("toil miscellaneous options", "Miscellaneous options")
addOptionFn(
"--maxLogFileSize",
dest="maxLogFileSize",
default=None,
help=(
"The maximum size of a job log file to keep (in bytes), log files larger "
"than this will be truncated to the last X bytes. Default is 50 "
"kilobytes, default=%s" % config.maxLogFileSize
),
)
addOptionFn(
"--sseKey",
dest="sseKey",
default=None,
help="Path to file containing 32 character key to be used for server-side encryption on awsJobStore. SSE will "
"not be used if this flag is not passed.",
)
addOptionFn(
"--cseKey",
dest="cseKey",
default=None,
help="Path to file containing 256-bit key to be used for client-side encryption on "
"azureJobStore. By default, no encryption is used.",
)
#
# Debug options
#
addOptionFn = addGroupFn("toil debug options", "Debug options")
addOptionFn(
"--badWorker",
dest="badWorker",
default=None,
help=(
"For testing purposes randomly kill 'badWorker' proportion of jobs using SIGKILL, default=%s"
% config.badWorker
),
)
addOptionFn(
"--badWorkerFailInterval",
dest="badWorkerFailInterval",
default=None,
help=(
"When killing the job pick uniformly within the interval from 0.0 to "
"'badWorkerFailInterval' seconds after the worker starts, default=%s"
% config.badWorkerFailInterval
),
)
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def setupToil(options, userScript=None):
"""
Creates the data-structures needed for running a toil.
:type userScript: toil.resource.ModuleDescriptor
"""
# Make the default config object
config = Config()
# Get options specified by the user
config.setOptions(options)
if not options.restart: # Create for the first time
batchSystemClass, kwargs = loadBatchSystemClass(config)
# Load the jobStore
jobStore = loadJobStore(config.jobStore, config=config)
else:
# Reload the workflow
jobStore = loadJobStore(config.jobStore)
config = jobStore.config
# Update the earlier config with any options that have been set
config.setOptions(options)
# Write these new options back to disk
jobStore.writeConfigToStore()
# Get the batch system class
batchSystemClass, kwargs = loadBatchSystemClass(config)
if (
userScript is not None
and not userScript.belongsToToil
and batchSystemClass.supportsHotDeployment()
):
kwargs["userScript"] = userScript.saveAsResourceTo(jobStore)
# TODO: toil distribution
batchSystem = createBatchSystem(config, batchSystemClass, kwargs)
try:
# Set environment variables required by job store
for k, v in jobStore.getEnv().iteritems():
batchSystem.setEnv(k, v)
# Set environment variables passed on command line
for k, v in config.environment.iteritems():
batchSystem.setEnv(k, v)
serialiseEnvironment(jobStore)
yield (config, batchSystem, jobStore)
finally:
logger.debug("Shutting down batch system")
batchSystem.shutdown()
|
def setupToil(options, userScript=None):
"""
Creates the data-structures needed for running a toil.
:type userScript: toil.resource.ModuleDescriptor
"""
# Make the default config object
config = Config()
# Get options specified by the user
config.setOptions(options)
if not options.restart: # Create for the first time
batchSystemClass, kwargs = loadBatchSystemClass(config)
# Load the jobStore
jobStore = loadJobStore(config.jobStore, config=config)
else:
# Reload the workflow
jobStore = loadJobStore(config.jobStore)
config = jobStore.config
# Update the earlier config with any options that have been set
config.setOptions(options)
# Write these new options back to disk
jobStore.writeConfigToStore()
# Get the batch system class
batchSystemClass, kwargs = loadBatchSystemClass(config)
if (
userScript is not None
and not userScript.belongsToToil
and batchSystemClass.supportsHotDeployment()
):
kwargs["userScript"] = userScript.saveAsResourceTo(jobStore)
# TODO: toil distribution
batchSystem = createBatchSystem(config, batchSystemClass, kwargs)
try:
serialiseEnvironment(jobStore)
yield (config, batchSystem, jobStore)
finally:
logger.debug("Shutting down batch system")
batchSystem.shutdown()
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def __init__(self, accountName, namePrefix, config=None, jobChunkSize=65535):
self.jobChunkSize = jobChunkSize
self.keyPath = None
self.account_key = _fetchAzureAccountKey(accountName)
# Table names have strict requirements in Azure
self.namePrefix = self._sanitizeTableName(namePrefix)
log.debug("Creating job store with name prefix '%s'" % self.namePrefix)
# These are the main API entrypoints.
self.tableService = TableService(
account_key=self.account_key, account_name=accountName
)
self.blobService = BlobService(
account_key=self.account_key, account_name=accountName
)
# Register our job-store in the global table for this storage account
self.registryTable = self._getOrCreateTable("toilRegistry")
exists = self.registryTable.get_entity(row_key=self.namePrefix)
self._checkJobStoreCreation(
config is not None, exists, accountName + ":" + self.namePrefix
)
self.registryTable.insert_or_replace_entity(
row_key=self.namePrefix, entity={"exists": True}
)
# Serialized jobs table
self.jobItems = self._getOrCreateTable(self.qualify("jobs"))
# Job<->file mapping table
self.jobFileIDs = self._getOrCreateTable(self.qualify("jobFileIDs"))
# Container for all shared and unshared files
self.files = self._getOrCreateBlobContainer(self.qualify("files"))
# Stats and logging strings
self.statsFiles = self._getOrCreateBlobContainer(self.qualify("statsfiles"))
# File IDs that contain stats and logging strings
self.statsFileIDs = self._getOrCreateTable(self.qualify("statsFileIDs"))
super(AzureJobStore, self).__init__(config=config)
if self.config.cseKey is not None:
self.keyPath = self.config.cseKey
|
def __init__(self, accountName, namePrefix, config=None, jobChunkSize=65535):
self.jobChunkSize = jobChunkSize
self.keyPath = None
account_key = _fetchAzureAccountKey(accountName)
# Table names have strict requirements in Azure
self.namePrefix = self._sanitizeTableName(namePrefix)
log.debug("Creating job store with name prefix '%s'" % self.namePrefix)
# These are the main API entrypoints.
self.tableService = TableService(account_key=account_key, account_name=accountName)
self.blobService = BlobService(account_key=account_key, account_name=accountName)
# Register our job-store in the global table for this storage account
self.registryTable = self._getOrCreateTable("toilRegistry")
exists = self.registryTable.get_entity(row_key=self.namePrefix)
self._checkJobStoreCreation(
config is not None, exists, accountName + ":" + self.namePrefix
)
self.registryTable.insert_or_replace_entity(
row_key=self.namePrefix, entity={"exists": True}
)
# Serialized jobs table
self.jobItems = self._getOrCreateTable(self.qualify("jobs"))
# Job<->file mapping table
self.jobFileIDs = self._getOrCreateTable(self.qualify("jobFileIDs"))
# Container for all shared and unshared files
self.files = self._getOrCreateBlobContainer(self.qualify("files"))
# Stats and logging strings
self.statsFiles = self._getOrCreateBlobContainer(self.qualify("statsfiles"))
# File IDs that contain stats and logging strings
self.statsFileIDs = self._getOrCreateTable(self.qualify("statsFileIDs"))
super(AzureJobStore, self).__init__(config=config)
if self.config.cseKey is not None:
self.keyPath = self.config.cseKey
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def worker(self, inputQueue):
while True:
args = inputQueue.get()
if args is None:
log.debug("Received queue sentinel.")
break
jobCommand, jobID, jobCores, jobMemory, jobDisk, environment = args
try:
coreFractions = int(jobCores / self.minCores)
log.debug(
"Acquiring %i bytes of memory from a pool of %s.",
jobMemory,
self.memory,
)
with self.memory.acquisitionOf(jobMemory):
log.debug(
"Acquiring %i fractional cores from a pool of %s to satisfy a "
"request of %f cores",
coreFractions,
self.coreFractions,
jobCores,
)
with self.coreFractions.acquisitionOf(coreFractions):
log.info("Executing command: '%s'.", jobCommand)
with self.popenLock:
popen = subprocess.Popen(
jobCommand, shell=True, env=dict(os.environ, **environment)
)
statusCode = None
info = Info(time.time(), popen, killIntended=False)
try:
self.runningJobs[jobID] = info
try:
statusCode = popen.wait()
if 0 != statusCode:
if statusCode != -9 or not info.killIntended:
log.error(
"Got exit code %i (indicating failure) from "
"command '%s'.",
statusCode,
jobCommand,
)
finally:
self.runningJobs.pop(jobID)
finally:
if statusCode is not None and not info.killIntended:
self.outputQueue.put((jobID, statusCode))
finally:
log.debug(
"Finished job. self.coreFractions ~ %s and self.memory ~ %s",
self.coreFractions.value,
self.memory.value,
)
log.debug("Exiting worker thread normally.")
|
def worker(self, inputQueue):
while True:
args = inputQueue.get()
if args is None:
log.debug("Received queue sentinel.")
break
jobCommand, jobID, jobCores, jobMemory, jobDisk = args
try:
coreFractions = int(jobCores / self.minCores)
log.debug(
"Acquiring %i bytes of memory from a pool of %s.",
jobMemory,
self.memory,
)
with self.memory.acquisitionOf(jobMemory):
log.debug(
"Acquiring %i fractional cores from a pool of %s to satisfy a "
"request of %f cores",
coreFractions,
self.coreFractions,
jobCores,
)
with self.coreFractions.acquisitionOf(coreFractions):
log.info("Executing command: '%s'.", jobCommand)
with self.popenLock:
popen = subprocess.Popen(jobCommand, shell=True)
info = Info(time.time(), popen, killIntended=False)
self.runningJobs[jobID] = info
try:
statusCode = popen.wait()
if 0 != statusCode:
if statusCode != -9 or not info.killIntended:
log.error(
"Got exit code %i (indicating failure) from command '%s'.",
statusCode,
jobCommand,
)
finally:
self.runningJobs.pop(jobID)
finally:
log.debug(
"Finished job. self.coreFractions ~ %s and self.memory ~ %s",
self.coreFractions.value,
self.memory.value,
)
if not info.killIntended:
self.outputQueue.put((jobID, statusCode))
log.debug("Exiting worker thread normally.")
|
https://github.com/DataBiosphere/toil/issues/421
|
DEBUG:toil.batchSystems.mesos.executor:Starting executor
I0924 22:47:08.587648 43191 exec.cpp:132] Version: 0.22.0
I0924 22:47:08.592193 43192 exec.cpp:206] Executor registered on slave 20150924-214557-83886090-5050-44087-S8
INFO:toil.batchSystems.mesos.executor:Registered with framework
DEBUG:toil.batchSystems.mesos.executor:Running task 0
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
DEBUG:toil.batchSystems.mesos.executor:Invoking command: '/usr/bin/python2.7 -E /usr/local/lib/python2.7/dist-packages/toil/worker.py azure:hgvm:tree2 683b1010_55c3_4978_a7e7_853f17ed1803'
DEBUG:toil.batchSystems.mesos.executor:Sent stats message
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 408, in <module>
main()
File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 89, in main
jobStore = loadJobStore(jobStoreString)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 366, in loadJobStore
return AzureJobStore( account, namePrefix, config=config )
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 78, in __init__
account_key = _fetchAzureAccountKey(accountName)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 63, in _fetchAzureAccountKey
return configParser.get('AzureStorageCredentials', accountName)
File "/usr/lib/python2.7/ConfigParser.py", line 330, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'AzureStorageCredentials'
DEBUG:toil.batchSystems.mesos.executor:Sending status update ...
DEBUG:toil.batchSystems.mesos.executor:Sent status update
I0924 22:47:09.738494 43200 exec.cpp:379] Executor asked to shutdown
CRITICAL:toil.batchSystems.mesos.executor:Shutting down executor...
CRITICAL:toil.batchSystems.mesos.executor:Executor shut down
|
ConfigParser.NoSectionError
|
def _serialiseJob(self, jobStore, jobsToJobWrappers, rootJobWrapper):
"""
Pickle a job and its jobWrapper to disk.
"""
# Pickle the job so that its run method can be run at a later time.
# Drop out the children/followOns/predecessors/services - which are
# all recorded within the jobStore and do not need to be stored within
# the job
self._children = []
self._followOns = []
self._services = []
self._directPredecessors = set()
# The pickled job is "run" as the command of the job, see worker
# for the mechanism which unpickles the job and executes the Job.run
# method.
with jobStore.writeFileStream(rootJobWrapper.jobStoreID) as (
fileHandle,
fileStoreID,
):
cPickle.dump(self, fileHandle, cPickle.HIGHEST_PROTOCOL)
# Note that getUserScript() may have beeen overridden. This is intended. If we used
# self.userModule directly, we'd be getting a reference to job.py if the job was
# specified as a function (as opposed to a class) since that is where FunctionWrappingJob
# is defined. What we really want is the module that was loaded as __main__,
# and FunctionWrappingJob overrides getUserScript() to give us just that. Only then can
# filter_main() in _unpickle( ) do its job of resolveing any user-defined type or function.
userScript = self.getUserScript().globalize()
jobsToJobWrappers[self].command = " ".join(("_toil", fileStoreID) + userScript)
# Update the status of the jobWrapper on disk
jobStore.update(jobsToJobWrappers[self])
|
def _serialiseJob(self, jobStore, jobsToJobWrappers, rootJobWrapper):
"""
Pickle a job and its jobWrapper to disk.
"""
# Pickle the job so that its run method can be run at a later time.
# Drop out the children/followOns/predecessors/services - which are
# all recorded within the jobStore and do not need to be stored within
# the job
self._children = []
self._followOns = []
self._services = []
self._directPredecessors = set()
# The pickled job is "run" as the command of the job, see worker
# for the mechanism which unpickles the job and executes the Job.run
# method.
with jobStore.writeFileStream(rootJobWrapper.jobStoreID) as (
fileHandle,
fileStoreID,
):
cPickle.dump(self, fileHandle, cPickle.HIGHEST_PROTOCOL)
jobsToJobWrappers[self].command = " ".join(
("_toil", fileStoreID) + self.userModule.globalize()
)
# Update the status of the jobWrapper on disk
jobStore.update(jobsToJobWrappers[self])
|
https://github.com/DataBiosphere/toil/issues/423
|
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: Next available file descriptor: 5
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: DEBUG:__main__:Next available file descriptor: 5
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: Parsed job
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: DEBUG:__main__:Parsed job
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: Traceback (most recent call last):
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 238, in main
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: jobStore)._execute( jobWrapper=job,
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 555, in _loadJob
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: return cls._unpickle(userModule, fileHandle)
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 576, in _unpickle
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: return unpickler.load()
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 571, in filter_main
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: return getattr(userModule, class_name)
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: AttributeError: 'module' object has no attribute 'AzureIOStore'
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: Exiting the worker because of a failed job on host hgvmclusteragent1
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: ERROR:__main__:Exiting the worker because of a failed job on host hgvmclusteragent1
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 732fd38e_d585_4405_8d6b_c37a982f1ba5 to 0
|
AttributeError
|
def _serialiseJob(self, jobStore, jobsToJobWrappers, rootJobWrapper):
"""
Pickle a job and its jobWrapper to disk.
"""
# Pickle the job so that its run method can be run at a later time.
# Drop out the children/followOns/predecessors/services - which are
# all recorded within the jobStore and do not need to be stored within
# the job
self._children = []
self._followOns = []
self._services = []
self._directPredecessors = set()
# The pickled job is "run" as the command of the job, see worker
# for the mechanism which unpickles the job and executes the Job.run
# method.
with jobStore.writeFileStream(rootJobWrapper.jobStoreID) as (
fileHandle,
fileStoreID,
):
cPickle.dump(self, fileHandle, cPickle.HIGHEST_PROTOCOL)
# Note that getUserScript() may have beeen overridden. This is intended. If we used
# self.userModule directly, we'd be getting a reference to job.py if the job was
# specified as a function (as opposed to a class) since that is where FunctionWrappingJob
# is defined. What we really want is the module that was loaded as __main__,
# and FunctionWrappingJob overrides getUserScript() to give us just that. Only then can
# filter_main() in _unpickle( ) do its job of resolveing any user-defined type or function.
userScript = self.getUserScript().globalize()
jobsToJobWrappers[self].command = " ".join(("_toil", fileStoreID) + userScript)
# Update the status of the jobWrapper on disk
jobStore.update(jobsToJobWrappers[self])
|
def _serialiseJob(self, jobStore, jobsToJobWrappers, rootJobWrapper):
"""
Pickle a job and its jobWrapper to disk.
"""
# Pickle the job so that its run method can be run at a later time.
# Drop out the children/followOns/predecessors/services - which are
# all recorded within the jobStore and do not need to be stored within
# the job
self._children = []
self._followOns = []
self._services = []
self._directPredecessors = set()
# The pickled job is "run" as the command of the job, see worker
# for the mechanism which unpickles the job and executes the Job.run
# method.
with jobStore.writeFileStream(rootJobWrapper.jobStoreID) as (
fileHandle,
fileStoreID,
):
cPickle.dump(self, fileHandle, cPickle.HIGHEST_PROTOCOL)
jobsToJobWrappers[self].command = " ".join(
("_toil", fileStoreID) + self.userModule.globalize()
)
# Update the status of the jobWrapper on disk
jobStore.update(jobsToJobWrappers[self])
|
https://github.com/DataBiosphere/toil/issues/423
|
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: Next available file descriptor: 5
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: DEBUG:__main__:Next available file descriptor: 5
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: Parsed job
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: DEBUG:__main__:Parsed job
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: Traceback (most recent call last):
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 238, in main
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: jobStore)._execute( jobWrapper=job,
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 555, in _loadJob
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: return cls._unpickle(userModule, fileHandle)
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 576, in _unpickle
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: return unpickler.load()
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 571, in filter_main
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: return getattr(userModule, class_name)
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: AttributeError: 'module' object has no attribute 'AzureIOStore'
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: Exiting the worker because of a failed job on host hgvmclusteragent1
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: ERROR:__main__:Exiting the worker because of a failed job on host hgvmclusteragent1
WARNING:toil.leader:732fd38e_d585_4405_8d6b_c37a982f1ba5: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 732fd38e_d585_4405_8d6b_c37a982f1ba5 to 0
|
AttributeError
|
def __init__(
self,
jobStore,
jobWrapper,
localTempDir,
inputBlockFn,
jobStoreFileIDToCacheLocation,
terminateEvent,
):
"""
This constructor should not be called by the user,
FileStore instances are only provided as arguments
to the run function.
"""
self.jobStore = jobStore
self.jobWrapper = jobWrapper
self.localTempDir = os.path.abspath(localTempDir)
self.loggingMessages = []
self.filesToDelete = set()
self.jobsToDelete = set()
# Asynchronous writes stuff
self.workerNumber = 2
self.queue = Queue()
self.updateSemaphore = Semaphore()
self.terminateEvent = terminateEvent
# Function to write files to job store
def asyncWrite():
try:
while True:
try:
# Block for up to two seconds waiting for a file
args = self.queue.get(timeout=2)
except Empty:
# Check if termination event is signaled
# (set in the event of an exception in the worker)
if terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting")
continue
# Normal termination condition is getting None from queue
if args == None:
break
inputFileHandle, jobStoreFileID = args
# We pass in a fileHandle, rather than the file-name, in case
# the file itself is deleted. The fileHandle itself should persist
# while we maintain the open file handle
with jobStore.updateFileStream(jobStoreFileID) as outputFileHandle:
bufferSize = 1000000 # TODO: This buffer number probably needs to be modified/tuned
while 1:
copyBuffer = inputFileHandle.read(bufferSize)
if not copyBuffer:
break
outputFileHandle.write(copyBuffer)
inputFileHandle.close()
except:
terminateEvent.set()
raise
self.workers = map(lambda i: Thread(target=asyncWrite), range(self.workerNumber))
for worker in self.workers:
worker.start()
self.inputBlockFn = inputBlockFn
# Caching
#
# For files in jobStore that are on the local disk,
# map of jobStoreFileIDs to locations in localTempDir.
self.jobStoreFileIDToCacheLocation = jobStoreFileIDToCacheLocation
|
def __init__(
self,
jobStore,
jobWrapper,
localTempDir,
inputBlockFn,
jobStoreFileIDToCacheLocation,
terminateEvent,
):
"""
This constructor should not be called by the user,
FileStore instances are only provided as arguments
to the run function.
"""
self.jobStore = jobStore
self.jobWrapper = jobWrapper
self.localTempDir = localTempDir
self.loggingMessages = []
self.filesToDelete = set()
self.jobsToDelete = set()
# Asynchronous writes stuff
self.workerNumber = 2
self.queue = Queue()
self.updateSemaphore = Semaphore()
self.terminateEvent = terminateEvent
# Function to write files to job store
def asyncWrite():
try:
while True:
try:
# Block for up to two seconds waiting for a file
args = self.queue.get(timeout=2)
except Empty:
# Check if termination event is signaled
# (set in the event of an exception in the worker)
if terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting")
continue
# Normal termination condition is getting None from queue
if args == None:
break
inputFileHandle, jobStoreFileID = args
# We pass in a fileHandle, rather than the file-name, in case
# the file itself is deleted. The fileHandle itself should persist
# while we maintain the open file handle
with jobStore.updateFileStream(jobStoreFileID) as outputFileHandle:
bufferSize = 1000000 # TODO: This buffer number probably needs to be modified/tuned
while 1:
copyBuffer = inputFileHandle.read(bufferSize)
if not copyBuffer:
break
outputFileHandle.write(copyBuffer)
inputFileHandle.close()
except:
terminateEvent.set()
raise
self.workers = map(lambda i: Thread(target=asyncWrite), range(self.workerNumber))
for worker in self.workers:
worker.start()
self.inputBlockFn = inputBlockFn
# Caching
#
# For files in jobStore that are on the local disk,
# map of jobStoreFileIDs to locations in localTempDir.
self.jobStoreFileIDToCacheLocation = jobStoreFileIDToCacheLocation
|
https://github.com/DataBiosphere/toil/issues/484
|
[centos@arjun-fake-podk toil_tests]$ python module_tests.py
No handlers could be found for logger "toil.resource"
INFO:toil.lib.bioio:Logging set at level: DEBUG
INFO:toil.lib.bioio:Logging to file: /data1/star_log
INFO:toil.common:Using the single machine batch system
INFO:toil.jobStores.fileJobStore:Jobstore directory is: /data1/toil_tempdir
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (8).
INFO:toil.batchSystems.singleMachine:Setting up the thread pool with 80 workers, given a minimum CPU fraction of 0.100000 and a maximum CPU value of 8.
INFO:toil.common:Written the environment for the jobs to the environment file
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
INFO:toil.leader:Checked batch system has no running jobs and no updated jobs
INFO:toil.leader:Found 1 jobs to start and 0 jobs with successors to run
INFO:toil.leader:Starting the main loop
DEBUG:toil.leader:Built the jobs list, currently have 1 jobs to update and 0 jobs issued
DEBUG:toil.batchSystems.singleMachine:Issuing the command: /usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A with memory: 2147483648, cores: 1, disk: 2147483648
DEBUG:toil.leader:Issued job with job store ID: e/j/jobaSjV4A and job batch system ID: 0 and cores: 1, disk: 2147483648, and memory: 2147483648
DEBUG:toil.batchSystems.singleMachine:Acquiring 2147483648 bytes of memory from a pool of 9223372036854775807.
DEBUG:toil.batchSystems.singleMachine:Acquiring 10 fractional cores from a pool of 80 to satisfy a request of 1.000000 cores
INFO:toil.batchSystems.singleMachine:Executing command: '/usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A'.
Exception in thread Thread-13:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception in thread Thread-14:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception RuntimeError: RuntimeError('cannot join current thread',) in <bound method FileStore.__del__ of <toil.job.FileStore object at 0x7f5142899f90>> ignored
|
RuntimeError
|
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to to the global file store,
returns an ID that can be used to retrieve the file.
If cleanup is True then the global file will be deleted once the job
and all its successors have completed running. If not the global file
must be deleted manually.
If the local file is a file returned by Job.FileStore.getLocalTempFile
or is in a directory, or, recursively, a subdirectory, returned by
Job.FileStore.getLocalTempDir then the write is asynchronous,
so further modifications during execution to the file pointed by
localFileName will result in undetermined behavior. Otherwise, the
method will block until the file is written to the file store.
"""
# Put the file into the cache if it is a path within localTempDir
absLocalFileName = os.path.abspath(localFileName)
cleanupID = None if not cleanup else self.jobWrapper.jobStoreID
if absLocalFileName.startswith(self.localTempDir):
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
self.queue.put((open(absLocalFileName, "r"), jobStoreFileID))
# Chmod to make file read only to try to prevent accidental user modification
os.chmod(absLocalFileName, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
self.jobStoreFileIDToCacheLocation[jobStoreFileID] = absLocalFileName
else:
# Write the file directly to the file store
jobStoreFileID = self.jobStore.writeFile(localFileName, cleanupID)
return jobStoreFileID
|
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to to the global file store,
returns an ID that can be used to retrieve the file.
If cleanup is True then the global file will be deleted once the job
and all its successors have completed running. If not the global file
must be deleted manually.
The write is asynchronous, so further modifications during execution
to the file pointed by localFileName will result in undetermined behavior.
"""
jobStoreFileID = self.jobStore.getEmptyFileStoreID(
None if not cleanup else self.jobWrapper.jobStoreID
)
self.queue.put((open(localFileName, "r"), jobStoreFileID))
# Now put the file into the cache if it is a path within localTempDir
absLocalFileName = os.path.abspath(localFileName)
if absLocalFileName.startswith(self.localTempDir):
# Chmod to make file read only
os.chmod(absLocalFileName, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
self.jobStoreFileIDToCacheLocation[jobStoreFileID] = absLocalFileName
return jobStoreFileID
|
https://github.com/DataBiosphere/toil/issues/484
|
[centos@arjun-fake-podk toil_tests]$ python module_tests.py
No handlers could be found for logger "toil.resource"
INFO:toil.lib.bioio:Logging set at level: DEBUG
INFO:toil.lib.bioio:Logging to file: /data1/star_log
INFO:toil.common:Using the single machine batch system
INFO:toil.jobStores.fileJobStore:Jobstore directory is: /data1/toil_tempdir
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (8).
INFO:toil.batchSystems.singleMachine:Setting up the thread pool with 80 workers, given a minimum CPU fraction of 0.100000 and a maximum CPU value of 8.
INFO:toil.common:Written the environment for the jobs to the environment file
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
INFO:toil.leader:Checked batch system has no running jobs and no updated jobs
INFO:toil.leader:Found 1 jobs to start and 0 jobs with successors to run
INFO:toil.leader:Starting the main loop
DEBUG:toil.leader:Built the jobs list, currently have 1 jobs to update and 0 jobs issued
DEBUG:toil.batchSystems.singleMachine:Issuing the command: /usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A with memory: 2147483648, cores: 1, disk: 2147483648
DEBUG:toil.leader:Issued job with job store ID: e/j/jobaSjV4A and job batch system ID: 0 and cores: 1, disk: 2147483648, and memory: 2147483648
DEBUG:toil.batchSystems.singleMachine:Acquiring 2147483648 bytes of memory from a pool of 9223372036854775807.
DEBUG:toil.batchSystems.singleMachine:Acquiring 10 fractional cores from a pool of 80 to satisfy a request of 1.000000 cores
INFO:toil.batchSystems.singleMachine:Executing command: '/usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A'.
Exception in thread Thread-13:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception in thread Thread-14:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception RuntimeError: RuntimeError('cannot join current thread',) in <bound method FileStore.__del__ of <toil.job.FileStore object at 0x7f5142899f90>> ignored
|
RuntimeError
|
def readGlobalFile(self, fileStoreID, userPath=None, cache=True):
"""
Returns an absolute path to a local, temporary copy of the file
keyed by fileStoreID.
:param userPath: a path to the name of file to which the global file will be
copied or hard-linked (see below).
:param cache: a boolean to switch on caching (see below). Caching will
attempt to keep copies of files between sequences of jobs run on the same
worker.
If cache=True and userPath is either:
(1) a file path contained within a directory or,
recursively, a subdirectory of a temporary directory returned by
Job.FileStore.getLocalTempDir(), or (2) a file path returned by
Job.FileStore.getLocalTempFile() then the file will be cached and
returned file will be read only (have permissions 444).
If userPath is specified and the file is already cached,
the userPath file will be a hard link to the actual location, else it
will be an actual copy of the file.
If the cache=False or userPath is not either of the above the file
will not be cached and will have default permissions. Note, if the file
is already cached this will result in two copies of the file on the system.
:rtype : the absolute path to the read file
"""
if fileStoreID in self.filesToDelete:
raise RuntimeError(
"Trying to access a file in the jobStore you've deleted: %s" % fileStoreID
)
if userPath != None:
userPath = os.path.abspath(userPath) # Make an absolute path
# Turn off caching if user file is not in localTempDir
if cache and not userPath.startswith(self.localTempDir):
cache = False
# When requesting a new file from the jobStore first check if fileStoreID
# is a key in jobStoreFileIDToCacheLocation.
if fileStoreID in self.jobStoreFileIDToCacheLocation:
cachedAbsFilePath = self.jobStoreFileIDToCacheLocation[fileStoreID]
if cache:
# If the user specifies a location and it is not the current location
# return a hardlink to the location, else return the original location
if userPath == None or userPath == cachedAbsFilePath:
return cachedAbsFilePath
# Chmod to make file read only
if os.path.exists(userPath):
os.remove(userPath)
os.link(cachedAbsFilePath, userPath)
return userPath
else:
# If caching is not true then make a copy of the file
localFilePath = userPath if userPath != None else self.getLocalTempFile()
shutil.copyfile(cachedAbsFilePath, localFilePath)
return localFilePath
else:
# If it is not in the cache read it from the jobStore to the
# desired location
localFilePath = userPath if userPath != None else self.getLocalTempFile()
self.jobStore.readFile(fileStoreID, localFilePath)
# If caching is enabled and the file is in local temp dir then
# add to cache and make read only
if cache:
assert localFilePath.startswith(self.localTempDir)
self.jobStoreFileIDToCacheLocation[fileStoreID] = localFilePath
os.chmod(localFilePath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
return localFilePath
|
def readGlobalFile(self, fileStoreID, userPath=None):
"""
Returns an absolute path to a local, temporary copy of the file
keyed by fileStoreID.
*The returned file will be read only (have permissions 444).*
:param userPath: a path to the name of file to which the global file will be
copied or hard-linked (see below). userPath must either be: (1) a
file path contained within a directory or, recursively, a subdirectory
of a temporary directory returned by Job.FileStore.getLocalTempDir(),
or (2) a file path returned by Job.FileStore.getLocalTempFile().
If userPath is specified and this is not true a RuntimeError exception
will be raised. If userPath is specified and the file is already cached,
the userPath file will be a hard link to the actual location, else it
will be an actual copy of the file.
"""
if fileStoreID in self.filesToDelete:
raise RuntimeError(
"Trying to access a file in the jobStore you've deleted: %s" % fileStoreID
)
if userPath != None:
userPath = os.path.abspath(userPath) # Make an absolute path
# Check it is a valid location
if not userPath.startswith(self.localTempDir):
raise RuntimeError(
"The user path is not contained within the"
" temporary file hierarchy created by the job."
" User path: %s, temporary file root path: %s"
% (userPath, self.localTempDir)
)
# When requesting a new file from the jobStore first check if fileStoreID
# is a key in jobStoreFileIDToCacheLocation.
if fileStoreID in self.jobStoreFileIDToCacheLocation:
cachedAbsFilePath = self.jobStoreFileIDToCacheLocation[fileStoreID]
# If the user specifies a location and it is not the current location
# return a hardlink to the location, else return the original location
if userPath == None or userPath == cachedAbsFilePath:
return cachedAbsFilePath
if os.path.exists(userPath):
os.remove(userPath)
os.link(cachedAbsFilePath, userPath)
return userPath
else:
# If it is not in the cache read it from the jobStore to the
# desired location
localFilePath = userPath if userPath != None else self.getLocalTempFile()
self.jobStore.readFile(fileStoreID, localFilePath)
self.jobStoreFileIDToCacheLocation[fileStoreID] = localFilePath
# Chmod to make file read only
os.chmod(localFilePath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
return localFilePath
|
https://github.com/DataBiosphere/toil/issues/484
|
[centos@arjun-fake-podk toil_tests]$ python module_tests.py
No handlers could be found for logger "toil.resource"
INFO:toil.lib.bioio:Logging set at level: DEBUG
INFO:toil.lib.bioio:Logging to file: /data1/star_log
INFO:toil.common:Using the single machine batch system
INFO:toil.jobStores.fileJobStore:Jobstore directory is: /data1/toil_tempdir
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (8).
INFO:toil.batchSystems.singleMachine:Setting up the thread pool with 80 workers, given a minimum CPU fraction of 0.100000 and a maximum CPU value of 8.
INFO:toil.common:Written the environment for the jobs to the environment file
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
INFO:toil.leader:Checked batch system has no running jobs and no updated jobs
INFO:toil.leader:Found 1 jobs to start and 0 jobs with successors to run
INFO:toil.leader:Starting the main loop
DEBUG:toil.leader:Built the jobs list, currently have 1 jobs to update and 0 jobs issued
DEBUG:toil.batchSystems.singleMachine:Issuing the command: /usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A with memory: 2147483648, cores: 1, disk: 2147483648
DEBUG:toil.leader:Issued job with job store ID: e/j/jobaSjV4A and job batch system ID: 0 and cores: 1, disk: 2147483648, and memory: 2147483648
DEBUG:toil.batchSystems.singleMachine:Acquiring 2147483648 bytes of memory from a pool of 9223372036854775807.
DEBUG:toil.batchSystems.singleMachine:Acquiring 10 fractional cores from a pool of 80 to satisfy a request of 1.000000 cores
INFO:toil.batchSystems.singleMachine:Executing command: '/usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A'.
Exception in thread Thread-13:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception in thread Thread-14:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception RuntimeError: RuntimeError('cannot join current thread',) in <bound method FileStore.__del__ of <toil.job.FileStore object at 0x7f5142899f90>> ignored
|
RuntimeError
|
def _updateJobWhenDone(self):
"""
Asynchronously update the status of the job on the disk, first waiting
until the writing threads have finished and the inputBlockFn has stopped
blocking.
"""
def asyncUpdate():
try:
# Wait till all file writes have completed
for i in xrange(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
# Wait till input block-fn returns - in the event of an exception
# this will eventually terminate
self.inputBlockFn()
# Check the terminate event, if set we can not guarantee
# that the workers ended correctly, therefore we exit without
# completing the update
if self.terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting before update")
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobWrapper.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobWrapper)
# Delete any remnant jobs
map(self.jobStore.delete, self.jobsToDelete)
# Delete any remnant files
map(self.jobStore.deleteFile, self.filesToDelete)
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobWrapper.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobWrapper)
except:
self.terminateEvent.set()
raise
finally:
# Indicate that _blockFn can return
# This code will always run
self.updateSemaphore.release()
# The update semaphore is held while the jobWrapper is written to disk
try:
self.updateSemaphore.acquire()
t = Thread(target=asyncUpdate)
t.start()
except: # This is to ensure that the semaphore is released in a crash to stop a deadlock scenario
self.updateSemaphore.release()
raise
|
def _updateJobWhenDone(self):
"""
Asynchronously update the status of the job on the disk, first waiting
until the writing threads have finished and the inputBlockFn has stopped
blocking.
"""
def asyncUpdate():
try:
# Wait till all file writes have completed
for i in xrange(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
# Wait till input block-fn returns - in the event of an exception
# this will eventually terminate
self.inputBlockFn()
# Check the terminate event, if set we can not guarantee
# that the workers ended correctly, therefore we exit without
# completing the update
if self.terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting before update")
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobWrapper.filesToDelete = len(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobWrapper)
# Delete any remnant jobs
map(self.jobStore.delete, self.jobsToDelete)
# Delete any remnant files
map(self.jobStore.deleteFile, self.filesToDelete)
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobWrapper.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobWrapper)
except:
self.terminateEvent.set()
raise
finally:
# Indicate that _blockFn can return
# This code will always run
self.updateSemaphore.release()
# The update semaphore is held while the jobWrapper is written to disk
try:
self.updateSemaphore.acquire()
t = Thread(target=asyncUpdate)
t.start()
except: # This is to ensure that the semaphore is released in a crash to stop a deadlock scenario
self.updateSemaphore.release()
raise
|
https://github.com/DataBiosphere/toil/issues/484
|
[centos@arjun-fake-podk toil_tests]$ python module_tests.py
No handlers could be found for logger "toil.resource"
INFO:toil.lib.bioio:Logging set at level: DEBUG
INFO:toil.lib.bioio:Logging to file: /data1/star_log
INFO:toil.common:Using the single machine batch system
INFO:toil.jobStores.fileJobStore:Jobstore directory is: /data1/toil_tempdir
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (8).
INFO:toil.batchSystems.singleMachine:Setting up the thread pool with 80 workers, given a minimum CPU fraction of 0.100000 and a maximum CPU value of 8.
INFO:toil.common:Written the environment for the jobs to the environment file
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
INFO:toil.leader:Checked batch system has no running jobs and no updated jobs
INFO:toil.leader:Found 1 jobs to start and 0 jobs with successors to run
INFO:toil.leader:Starting the main loop
DEBUG:toil.leader:Built the jobs list, currently have 1 jobs to update and 0 jobs issued
DEBUG:toil.batchSystems.singleMachine:Issuing the command: /usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A with memory: 2147483648, cores: 1, disk: 2147483648
DEBUG:toil.leader:Issued job with job store ID: e/j/jobaSjV4A and job batch system ID: 0 and cores: 1, disk: 2147483648, and memory: 2147483648
DEBUG:toil.batchSystems.singleMachine:Acquiring 2147483648 bytes of memory from a pool of 9223372036854775807.
DEBUG:toil.batchSystems.singleMachine:Acquiring 10 fractional cores from a pool of 80 to satisfy a request of 1.000000 cores
INFO:toil.batchSystems.singleMachine:Executing command: '/usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A'.
Exception in thread Thread-13:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception in thread Thread-14:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception RuntimeError: RuntimeError('cannot join current thread',) in <bound method FileStore.__del__ of <toil.job.FileStore object at 0x7f5142899f90>> ignored
|
RuntimeError
|
def asyncUpdate():
try:
# Wait till all file writes have completed
for i in xrange(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
# Wait till input block-fn returns - in the event of an exception
# this will eventually terminate
self.inputBlockFn()
# Check the terminate event, if set we can not guarantee
# that the workers ended correctly, therefore we exit without
# completing the update
if self.terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting before update")
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobWrapper.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobWrapper)
# Delete any remnant jobs
map(self.jobStore.delete, self.jobsToDelete)
# Delete any remnant files
map(self.jobStore.deleteFile, self.filesToDelete)
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobWrapper.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobWrapper)
except:
self.terminateEvent.set()
raise
finally:
# Indicate that _blockFn can return
# This code will always run
self.updateSemaphore.release()
|
def asyncUpdate():
try:
# Wait till all file writes have completed
for i in xrange(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
# Wait till input block-fn returns - in the event of an exception
# this will eventually terminate
self.inputBlockFn()
# Check the terminate event, if set we can not guarantee
# that the workers ended correctly, therefore we exit without
# completing the update
if self.terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting before update")
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobWrapper.filesToDelete = len(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobWrapper)
# Delete any remnant jobs
map(self.jobStore.delete, self.jobsToDelete)
# Delete any remnant files
map(self.jobStore.deleteFile, self.filesToDelete)
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobWrapper.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobWrapper)
except:
self.terminateEvent.set()
raise
finally:
# Indicate that _blockFn can return
# This code will always run
self.updateSemaphore.release()
|
https://github.com/DataBiosphere/toil/issues/484
|
[centos@arjun-fake-podk toil_tests]$ python module_tests.py
No handlers could be found for logger "toil.resource"
INFO:toil.lib.bioio:Logging set at level: DEBUG
INFO:toil.lib.bioio:Logging to file: /data1/star_log
INFO:toil.common:Using the single machine batch system
INFO:toil.jobStores.fileJobStore:Jobstore directory is: /data1/toil_tempdir
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (8).
INFO:toil.batchSystems.singleMachine:Setting up the thread pool with 80 workers, given a minimum CPU fraction of 0.100000 and a maximum CPU value of 8.
INFO:toil.common:Written the environment for the jobs to the environment file
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
INFO:toil.leader:Checked batch system has no running jobs and no updated jobs
INFO:toil.leader:Found 1 jobs to start and 0 jobs with successors to run
INFO:toil.leader:Starting the main loop
DEBUG:toil.leader:Built the jobs list, currently have 1 jobs to update and 0 jobs issued
DEBUG:toil.batchSystems.singleMachine:Issuing the command: /usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A with memory: 2147483648, cores: 1, disk: 2147483648
DEBUG:toil.leader:Issued job with job store ID: e/j/jobaSjV4A and job batch system ID: 0 and cores: 1, disk: 2147483648, and memory: 2147483648
DEBUG:toil.batchSystems.singleMachine:Acquiring 2147483648 bytes of memory from a pool of 9223372036854775807.
DEBUG:toil.batchSystems.singleMachine:Acquiring 10 fractional cores from a pool of 80 to satisfy a request of 1.000000 cores
INFO:toil.batchSystems.singleMachine:Executing command: '/usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A'.
Exception in thread Thread-13:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception in thread Thread-14:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception RuntimeError: RuntimeError('cannot join current thread',) in <bound method FileStore.__del__ of <toil.job.FileStore object at 0x7f5142899f90>> ignored
|
RuntimeError
|
def _cleanLocalTempDir(self, cacheSize):
"""
At the end of the job, remove all localTempDir files except those whose value is in
jobStoreFileIDToCacheLocation.
:param cacheSize: the total number of bytes of files allowed in the cache.
"""
# Remove files so that the total cached files are smaller than a cacheSize
# List of pairs of (fileSize, fileStoreID) for cached files
cachedFileSizes = map(
lambda x: (os.stat(self.jobStoreFileIDToCacheLocation[x]).st_size, x),
self.jobStoreFileIDToCacheLocation.keys(),
)
# Total number of bytes stored in cached files
totalCachedFileSizes = sum(map(lambda x: x[0], cachedFileSizes))
# Remove smallest files first - this is not obviously best, could do it a different
# way
cachedFileSizes.sort()
cachedFileSizes.reverse()
# Now do the actual file removal
while totalCachedFileSizes > cacheSize:
fileSize, fileStoreID = cachedFileSizes.pop()
filePath = self.jobStoreFileIDToCacheLocation[fileStoreID]
self.jobStoreFileIDToCacheLocation.pop(fileStoreID)
os.remove(filePath)
totalCachedFileSizes -= fileSize
assert totalCachedFileSizes >= 0
# Iterate from the base of localTempDir and remove all
# files/empty directories, recursively
cachedFiles = set(self.jobStoreFileIDToCacheLocation.values())
def clean(dirOrFile):
canRemove = True
if os.path.isdir(dirOrFile):
for f in os.listdir(dirOrFile):
canRemove = canRemove and clean(os.path.join(dirOrFile, f))
if canRemove:
os.rmdir(dirOrFile) # Dir should be empty if canRemove is true
return canRemove
if dirOrFile in cachedFiles:
return False
os.remove(dirOrFile)
return True
clean(self.localTempDir)
|
def _cleanLocalTempDir(self, cacheSize):
"""
At the end of the job, remove all localTempDir files except those whose value is in
jobStoreFileIDToCacheLocation.
The param cacheSize is the total number of bytes of files allowed in the cache.
"""
# Remove files so that the total cached files are smaller than a cacheSize
# List of pairs of (fileSize, fileStoreID) for cached files
cachedFileSizes = map(
lambda x: (os.stat(self.jobStoreFileIDToCacheLocation[x]).st_size, x),
self.jobStoreFileIDToCacheLocation.keys(),
)
# Total number of bytes stored in cached files
totalCachedFileSizes = sum(map(lambda x: x[0], cachedFileSizes))
# Remove smallest files first - this is not obviously best, could do it a different
# way
cachedFileSizes.sort()
cachedFileSizes.reverse()
# Now do the actual file removal
while totalCachedFileSizes > cacheSize:
fileSize, fileStoreID = cachedFileSizes.pop()
filePath = self.jobStoreFileIDToCacheLocation[fileStoreID]
self.jobStoreFileIDToCacheLocation.pop(fileStoreID)
os.remove(filePath)
totalCachedFileSizes -= fileSize
assert totalCachedFileSizes >= 0
# Iterate from the base of localTempDir and remove all
# files/empty directories, recursively
cachedFiles = set(self.jobStoreFileIDToCacheLocation.values())
def clean(dirOrFile):
canRemove = True
if os.path.isdir(dirOrFile):
for f in os.listdir(dirOrFile):
canRemove = canRemove and clean(os.path.join(dirOrFile, f))
if canRemove:
os.rmdir(dirOrFile) # Dir should be empty if canRemove is true
return canRemove
if dirOrFile in cachedFiles:
return False
os.remove(dirOrFile)
return True
clean(self.localTempDir)
|
https://github.com/DataBiosphere/toil/issues/484
|
[centos@arjun-fake-podk toil_tests]$ python module_tests.py
No handlers could be found for logger "toil.resource"
INFO:toil.lib.bioio:Logging set at level: DEBUG
INFO:toil.lib.bioio:Logging to file: /data1/star_log
INFO:toil.common:Using the single machine batch system
INFO:toil.jobStores.fileJobStore:Jobstore directory is: /data1/toil_tempdir
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (8).
INFO:toil.batchSystems.singleMachine:Setting up the thread pool with 80 workers, given a minimum CPU fraction of 0.100000 and a maximum CPU value of 8.
INFO:toil.common:Written the environment for the jobs to the environment file
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
INFO:toil.leader:Checked batch system has no running jobs and no updated jobs
INFO:toil.leader:Found 1 jobs to start and 0 jobs with successors to run
INFO:toil.leader:Starting the main loop
DEBUG:toil.leader:Built the jobs list, currently have 1 jobs to update and 0 jobs issued
DEBUG:toil.batchSystems.singleMachine:Issuing the command: /usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A with memory: 2147483648, cores: 1, disk: 2147483648
DEBUG:toil.leader:Issued job with job store ID: e/j/jobaSjV4A and job batch system ID: 0 and cores: 1, disk: 2147483648, and memory: 2147483648
DEBUG:toil.batchSystems.singleMachine:Acquiring 2147483648 bytes of memory from a pool of 9223372036854775807.
DEBUG:toil.batchSystems.singleMachine:Acquiring 10 fractional cores from a pool of 80 to satisfy a request of 1.000000 cores
INFO:toil.batchSystems.singleMachine:Executing command: '/usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A'.
Exception in thread Thread-13:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception in thread Thread-14:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception RuntimeError: RuntimeError('cannot join current thread',) in <bound method FileStore.__del__ of <toil.job.FileStore object at 0x7f5142899f90>> ignored
|
RuntimeError
|
def clean(self, rootJobWrapper):
"""
Function to cleanup the state of a jobStore after a restart.
Fixes jobs that might have been partially updated.
Resets the try counts.
Removes jobs that are not successors of the rootJobWrapper.
"""
# Iterate from the root jobWrapper and collate all jobs that are reachable from it
# All other jobs returned by self.jobs() are orphaned and can be removed
reachableFromRoot = set()
def getConnectedJobs(jobWrapper):
if jobWrapper.jobStoreID in reachableFromRoot:
return
reachableFromRoot.add(jobWrapper.jobStoreID)
for jobs in jobWrapper.stack:
for successorJobStoreID in map(lambda x: x[0], jobs):
if successorJobStoreID not in reachableFromRoot and self.exists(
successorJobStoreID
):
getConnectedJobs(self.load(successorJobStoreID))
getConnectedJobs(rootJobWrapper)
# Cleanup the state of each jobWrapper
for jobWrapper in self.jobs():
changed = False # Flag to indicate if we need to update the jobWrapper
# on disk
if len(jobWrapper.filesToDelete) != 0:
# Delete any files that should already be deleted
for fileID in jobWrapper.filesToDelete:
logger.critical(
"Removing file in job store: %s that was marked for deletion but not previously removed"
% fileID
)
self.deleteFile(fileID)
jobWrapper.filesToDelete = []
changed = True
# Delete a jobWrapper if it is not reachable from the rootJobWrapper
if jobWrapper.jobStoreID not in reachableFromRoot:
logger.critical(
"Removing job: %s that is not a successor of the root job in cleanup"
% jobWrapper.jobStoreID
)
self.delete(jobWrapper.jobStoreID)
continue
# While jobs at the end of the stack are already deleted remove
# those jobs from the stack (this cleans up the case that the jobWrapper
# had successors to run, but had not been updated to reflect this)
while len(jobWrapper.stack) > 0:
jobs = [
command for command in jobWrapper.stack[-1] if self.exists(command[0])
]
if len(jobs) < len(jobWrapper.stack[-1]):
changed = True
if len(jobs) > 0:
jobWrapper.stack[-1] = jobs
break
else:
jobWrapper.stack.pop()
else:
break
# Reset the retry count of the jobWrapper
if jobWrapper.remainingRetryCount != self._defaultTryCount():
jobWrapper.remainingRetryCount = self._defaultTryCount()
changed = True
# This cleans the old log file which may
# have been left if the jobWrapper is being retried after a jobWrapper failure.
if jobWrapper.logJobStoreFileID != None:
self.delete(jobWrapper.logJobStoreFileID)
jobWrapper.logJobStoreFileID = None
changed = True
if changed: # Update, but only if a change has occurred
self.update(jobWrapper)
# Remove any crufty stats/logging files from the previous run
self.readStatsAndLogging(lambda x: None)
|
def clean(self, rootJobWrapper):
"""
Function to cleanup the state of a jobStore after a restart.
Fixes jobs that might have been partially updated.
Resets the try counts.
Removes jobs that are not successors of the rootJobWrapper.
"""
# Iterate from the root jobWrapper and collate all jobs that are reachable from it
# All other jobs returned by self.jobs() are orphaned and can be removed
reachableFromRoot = set()
def getConnectedJobs(jobWrapper):
if jobWrapper.jobStoreID in reachableFromRoot:
return
reachableFromRoot.add(jobWrapper.jobStoreID)
for jobs in jobWrapper.stack:
for successorJobStoreID in map(lambda x: x[0], jobs):
if successorJobStoreID not in reachableFromRoot and self.exists(
successorJobStoreID
):
getConnectedJobs(self.load(successorJobStoreID))
getConnectedJobs(rootJobWrapper)
# Cleanup the state of each jobWrapper
for jobWrapper in self.jobs():
changed = False # Flag to indicate if we need to update the jobWrapper
# on disk
if len(jobWrapper.filesToDelete) != 0:
# Delete any files that should already be deleted
for fileID in jobWrapper.filesToDelete:
logger.critical(
"Removing file in job store: %s that was marked for deletion but not previously removed"
% fileID
)
self.deleteFile(fileID)
jobWrapper.filesToDelete = set()
changed = True
# Delete a jobWrapper if it is not reachable from the rootJobWrapper
if jobWrapper.jobStoreID not in reachableFromRoot:
logger.critical(
"Removing job: %s that is not a successor of the root job in cleanup"
% jobWrapper.jobStoreID
)
self.delete(jobWrapper.jobStoreID)
continue
# While jobs at the end of the stack are already deleted remove
# those jobs from the stack (this cleans up the case that the jobWrapper
# had successors to run, but had not been updated to reflect this)
while len(jobWrapper.stack) > 0:
jobs = [
command for command in jobWrapper.stack[-1] if self.exists(command[0])
]
if len(jobs) < len(jobWrapper.stack[-1]):
changed = True
if len(jobs) > 0:
jobWrapper.stack[-1] = jobs
break
else:
jobWrapper.stack.pop()
else:
break
# Reset the retry count of the jobWrapper
if jobWrapper.remainingRetryCount != self._defaultTryCount():
jobWrapper.remainingRetryCount = self._defaultTryCount()
changed = True
# This cleans the old log file which may
# have been left if the jobWrapper is being retried after a jobWrapper failure.
if jobWrapper.logJobStoreFileID != None:
self.delete(jobWrapper.logJobStoreFileID)
jobWrapper.logJobStoreFileID = None
changed = True
if changed: # Update, but only if a change has occurred
self.update(jobWrapper)
# Remove any crufty stats/logging files from the previous run
self.readStatsAndLogging(lambda x: None)
|
https://github.com/DataBiosphere/toil/issues/484
|
[centos@arjun-fake-podk toil_tests]$ python module_tests.py
No handlers could be found for logger "toil.resource"
INFO:toil.lib.bioio:Logging set at level: DEBUG
INFO:toil.lib.bioio:Logging to file: /data1/star_log
INFO:toil.common:Using the single machine batch system
INFO:toil.jobStores.fileJobStore:Jobstore directory is: /data1/toil_tempdir
WARNING:toil.batchSystems.singleMachine:Limiting maxCores to CPU count of system (8).
INFO:toil.batchSystems.singleMachine:Setting up the thread pool with 80 workers, given a minimum CPU fraction of 0.100000 and a maximum CPU value of 8.
INFO:toil.common:Written the environment for the jobs to the environment file
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
WARNING:toil.resource:Can't globalize module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/site-packages', name='toil.job', extension='.pyc').
INFO:toil.leader:Checked batch system has no running jobs and no updated jobs
INFO:toil.leader:Found 1 jobs to start and 0 jobs with successors to run
INFO:toil.leader:Starting the main loop
DEBUG:toil.leader:Built the jobs list, currently have 1 jobs to update and 0 jobs issued
DEBUG:toil.batchSystems.singleMachine:Issuing the command: /usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A with memory: 2147483648, cores: 1, disk: 2147483648
DEBUG:toil.leader:Issued job with job store ID: e/j/jobaSjV4A and job batch system ID: 0 and cores: 1, disk: 2147483648, and memory: 2147483648
DEBUG:toil.batchSystems.singleMachine:Acquiring 2147483648 bytes of memory from a pool of 9223372036854775807.
DEBUG:toil.batchSystems.singleMachine:Acquiring 10 fractional cores from a pool of 80 to satisfy a request of 1.000000 cores
INFO:toil.batchSystems.singleMachine:Executing command: '/usr/local/bin/python -E /usr/local/lib/python2.7/site-packages/toil/worker.py /data1/toil_tempdir e/j/jobaSjV4A'.
Exception in thread Thread-13:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception in thread Thread-14:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/local/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/local/lib/python2.7/site-packages/toil/job.py", line 398, in asyncWrite
raise RuntimeError("The termination flag is set, exiting")
RuntimeError: The termination flag is set, exiting
Exception RuntimeError: RuntimeError('cannot join current thread',) in <bound method FileStore.__del__ of <toil.job.FileStore object at 0x7f5142899f90>> ignored
|
RuntimeError
|
def getIssuedBatchJobIDs(self):
"""
A list of jobs (as jobIDs) currently issued (may be running, or maybe just waiting).
"""
# TODO: Ensure jobList holds jobs that have been "launched" from Mesos
jobList = set()
for queue in self.jobQueueList.values():
for item in queue:
jobList.add(item.jobID)
for key in self.runningJobMap.keys():
jobList.add(key)
return list(jobList)
|
def getIssuedBatchJobIDs(self):
"""
A list of jobs (as jobIDs) currently issued (may be running, or maybe just waiting).
"""
# TODO: Ensure jobList holds jobs that have been "launched" from Mesos
jobList = []
for k, queue in self.jobQueueList.iteritems():
for item in queue:
jobList.append(item.jobID)
for k, v in self.runningJobMap.iteritems():
jobList.append(k)
return jobList
|
https://github.com/DataBiosphere/toil/issues/433
|
Failed to call scheduler's resourceOffer
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 359, in resourceOffers
self._updateStateToRunning(offer, task)
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 310, in _updateStateToRunning
self._deleteByJobID(int(task.task_id.value))
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 301, in _deleteByJobID
for key, jobType in self.jobQueueList.iteritems():
RuntimeError: dictionary changed size during iteration
I0929 00:22:16.111704 31507 sched.cpp:1623] Asked to abort the driver
INFO:realtime:Aligned /tmp/tmpvAfao6/localTempDir/output.gam
I0929 00:22:16.117781 31507 sched.cpp:856] Aborting framework '20150928-184937-83886090-5050-35771-0006'
|
RuntimeError
|
def getRunningBatchJobIDs(self):
"""
Gets a map of jobs (as jobIDs) currently running (not just waiting) and a how long they have been running for
(in seconds).
"""
currentTime = dict()
for jobID, data in self.runningJobMap.items():
currentTime[jobID] = time.time() - data.startTime
return currentTime
|
def getRunningBatchJobIDs(self):
"""
Gets a map of jobs (as jobIDs) currently running (not just waiting) and a how long they have been running for
(in seconds).
"""
currentTime = dict()
for jobID, data in self.runningJobMap.iteritems():
currentTime[jobID] = time.time() - data.startTime
return currentTime
|
https://github.com/DataBiosphere/toil/issues/433
|
Failed to call scheduler's resourceOffer
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 359, in resourceOffers
self._updateStateToRunning(offer, task)
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 310, in _updateStateToRunning
self._deleteByJobID(int(task.task_id.value))
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 301, in _deleteByJobID
for key, jobType in self.jobQueueList.iteritems():
RuntimeError: dictionary changed size during iteration
I0929 00:22:16.111704 31507 sched.cpp:1623] Asked to abort the driver
INFO:realtime:Aligned /tmp/tmpvAfao6/localTempDir/output.gam
I0929 00:22:16.117781 31507 sched.cpp:856] Aborting framework '20150928-184937-83886090-5050-35771-0006'
|
RuntimeError
|
def _sortJobsByResourceReq(self):
job_types = self.jobQueueList.keys()
# sorts from largest to smallest core usage
# TODO: add a size() method to ResourceSummary and use it as the key. Ask me why.
job_types.sort(key=lambda resourceRequirement: ResourceRequirement.cores)
job_types.reverse()
return job_types
|
def _sortJobsByResourceReq(self):
job_types = list(self.jobQueueList.keys())
# sorts from largest to smallest core usage
# TODO: add a size() method to ResourceSummary and use it as the key. Ask me why.
job_types.sort(key=lambda resourceRequirement: ResourceRequirement.cores)
job_types.reverse()
return job_types
|
https://github.com/DataBiosphere/toil/issues/433
|
Failed to call scheduler's resourceOffer
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 359, in resourceOffers
self._updateStateToRunning(offer, task)
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 310, in _updateStateToRunning
self._deleteByJobID(int(task.task_id.value))
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 301, in _deleteByJobID
for key, jobType in self.jobQueueList.iteritems():
RuntimeError: dictionary changed size during iteration
I0929 00:22:16.111704 31507 sched.cpp:1623] Asked to abort the driver
INFO:realtime:Aligned /tmp/tmpvAfao6/localTempDir/output.gam
I0929 00:22:16.117781 31507 sched.cpp:856] Aborting framework '20150928-184937-83886090-5050-35771-0006'
|
RuntimeError
|
def _deleteByJobID(
self,
jobID,
):
# FIXME: not efficient, I'm sure.
for jobType in self.jobQueueList.values():
for job in jobType:
if jobID == job.jobID:
jobType.remove(job)
|
def _deleteByJobID(
self,
jobID,
):
# FIXME: not efficient, I'm sure.
for key, jobType in self.jobQueueList.iteritems():
for job in jobType:
if jobID == job.jobID:
jobType.remove(job)
|
https://github.com/DataBiosphere/toil/issues/433
|
Failed to call scheduler's resourceOffer
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 359, in resourceOffers
self._updateStateToRunning(offer, task)
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 310, in _updateStateToRunning
self._deleteByJobID(int(task.task_id.value))
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 301, in _deleteByJobID
for key, jobType in self.jobQueueList.iteritems():
RuntimeError: dictionary changed size during iteration
I0929 00:22:16.111704 31507 sched.cpp:1623] Asked to abort the driver
INFO:realtime:Aligned /tmp/tmpvAfao6/localTempDir/output.gam
I0929 00:22:16.117781 31507 sched.cpp:856] Aborting framework '20150928-184937-83886090-5050-35771-0006'
|
RuntimeError
|
def __reconcile(self, driver):
"""
Queries the master about a list of running tasks. If the master has no knowledge of them, their state will be
updated to LOST.
"""
# FIXME: we need additional reconciliation. What about the tasks the master knows about but haven't updated?
now = time.time()
if now > self.lastReconciliation + self.reconciliationPeriod:
self.lastReconciliation = now
driver.reconcileTasks(self.runningJobMap.keys())
|
def __reconcile(self, driver):
"""
Queries the master about a list of running tasks. If the master has no knowledge of them, their state will be
updated to LOST.
"""
# FIXME: we need additional reconciliation. What about the tasks the master knows about but haven't updated?
now = time.time()
if now > self.lastReconciliation + self.reconciliationPeriod:
self.lastReconciliation = now
driver.reconcileTasks(list(self.runningJobMap.keys()))
|
https://github.com/DataBiosphere/toil/issues/433
|
Failed to call scheduler's resourceOffer
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 359, in resourceOffers
self._updateStateToRunning(offer, task)
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 310, in _updateStateToRunning
self._deleteByJobID(int(task.task_id.value))
File "/usr/local/lib/python2.7/dist-packages/toil/batchSystems/mesos/batchSystem.py", line 301, in _deleteByJobID
for key, jobType in self.jobQueueList.iteritems():
RuntimeError: dictionary changed size during iteration
I0929 00:22:16.111704 31507 sched.cpp:1623] Asked to abort the driver
INFO:realtime:Aligned /tmp/tmpvAfao6/localTempDir/output.gam
I0929 00:22:16.117781 31507 sched.cpp:856] Aborting framework '20150928-184937-83886090-5050-35771-0006'
|
RuntimeError
|
def _data_path(
path=None,
force_update=False,
update_path=True,
download=True,
name=None,
check_version=False,
return_version=False,
archive_name=None,
accept=False,
):
"""Aux function."""
key = {
"fake": "MNE_DATASETS_FAKE_PATH",
"misc": "MNE_DATASETS_MISC_PATH",
"sample": "MNE_DATASETS_SAMPLE_PATH",
"spm": "MNE_DATASETS_SPM_FACE_PATH",
"somato": "MNE_DATASETS_SOMATO_PATH",
"brainstorm": "MNE_DATASETS_BRAINSTORM_PATH",
"testing": "MNE_DATASETS_TESTING_PATH",
"multimodal": "MNE_DATASETS_MULTIMODAL_PATH",
"fnirs_motor": "MNE_DATASETS_FNIRS_MOTOR_PATH",
"opm": "MNE_DATASETS_OPM_PATH",
"visual_92_categories": "MNE_DATASETS_VISUAL_92_CATEGORIES_PATH",
"kiloword": "MNE_DATASETS_KILOWORD_PATH",
"mtrf": "MNE_DATASETS_MTRF_PATH",
"fieldtrip_cmc": "MNE_DATASETS_FIELDTRIP_CMC_PATH",
"phantom_4dbti": "MNE_DATASETS_PHANTOM_4DBTI_PATH",
"limo": "MNE_DATASETS_LIMO_PATH",
"refmeg_noise": "MNE_DATASETS_REFMEG_NOISE_PATH",
}[name]
path = _get_path(path, key, name)
# To update the testing or misc dataset, push commits, then make a new
# release on GitHub. Then update the "releases" variable:
releases = dict(testing="0.116", misc="0.8")
# And also update the "md5_hashes['testing']" variable below.
# To update any other dataset, update the data archive itself (upload
# an updated version) and update the md5 hash.
# try to match url->archive_name->folder_name
urls = dict( # the URLs to use
brainstorm=dict(
bst_auditory="https://osf.io/5t9n8/download?version=1",
bst_phantom_ctf="https://osf.io/sxr8y/download?version=1",
bst_phantom_elekta="https://osf.io/dpcku/download?version=1",
bst_raw="https://osf.io/9675n/download?version=2",
bst_resting="https://osf.io/m7bd3/download?version=3",
),
fake="https://github.com/mne-tools/mne-testing-data/raw/master/"
"datasets/foo.tgz",
misc="https://codeload.github.com/mne-tools/mne-misc-data/"
"tar.gz/%s" % releases["misc"],
sample="https://osf.io/86qa2/download?version=5",
somato="https://osf.io/tp4sg/download?version=7",
spm="https://osf.io/je4s8/download?version=2",
testing="https://codeload.github.com/mne-tools/mne-testing-data/"
"tar.gz/%s" % releases["testing"],
multimodal="https://ndownloader.figshare.com/files/5999598",
fnirs_motor="https://osf.io/dj3eh/download?version=1",
opm="https://osf.io/p6ae7/download?version=2",
visual_92_categories=[
"https://osf.io/8ejrs/download?version=1",
"https://osf.io/t4yjp/download?version=1",
],
mtrf="https://osf.io/h85s2/download?version=1",
kiloword="https://osf.io/qkvf9/download?version=1",
fieldtrip_cmc="https://osf.io/j9b6s/download?version=1",
phantom_4dbti="https://osf.io/v2brw/download?version=2",
refmeg_noise="https://osf.io/drt6v/download?version=1",
)
# filename of the resulting downloaded archive (only needed if the URL
# name does not match resulting filename)
archive_names = dict(
fieldtrip_cmc="SubjectCMC.zip",
kiloword="MNE-kiloword-data.tar.gz",
misc="mne-misc-data-%s.tar.gz" % releases["misc"],
mtrf="mTRF_1.5.zip",
multimodal="MNE-multimodal-data.tar.gz",
fnirs_motor="MNE-fNIRS-motor-data.tgz",
opm="MNE-OPM-data.tar.gz",
sample="MNE-sample-data-processed.tar.gz",
somato="MNE-somato-data.tar.gz",
spm="MNE-spm-face.tar.gz",
testing="mne-testing-data-%s.tar.gz" % releases["testing"],
visual_92_categories=[
"MNE-visual_92_categories-data-part1.tar.gz",
"MNE-visual_92_categories-data-part2.tar.gz",
],
phantom_4dbti="MNE-phantom-4DBTi.zip",
refmeg_noise="sample_reference_MEG_noise-raw.zip",
)
# original folder names that get extracted (only needed if the
# archive does not extract the right folder name; e.g., usually GitHub)
folder_origs = dict( # not listed means None (no need to move)
misc="mne-misc-data-%s" % releases["misc"],
testing="mne-testing-data-%s" % releases["testing"],
)
# finally, where we want them to extract to (only needed if the folder name
# is not the same as the last bit of the archive name without the file
# extension)
folder_names = dict(
brainstorm="MNE-brainstorm-data",
fake="foo",
misc="MNE-misc-data",
mtrf="mTRF_1.5",
sample="MNE-sample-data",
testing="MNE-testing-data",
visual_92_categories="MNE-visual_92_categories-data",
fieldtrip_cmc="MNE-fieldtrip_cmc-data",
phantom_4dbti="MNE-phantom-4DBTi",
refmeg_noise="MNE-refmeg-noise-data",
)
md5_hashes = dict(
brainstorm=dict(
bst_auditory="fa371a889a5688258896bfa29dd1700b",
bst_phantom_ctf="80819cb7f5b92d1a5289db3fb6acb33c",
bst_phantom_elekta="1badccbe17998d18cc373526e86a7aaf",
bst_raw="fa2efaaec3f3d462b319bc24898f440c",
bst_resting="70fc7bf9c3b97c4f2eab6260ee4a0430",
),
fake="3194e9f7b46039bb050a74f3e1ae9908",
misc="0f88194266121dd9409be94184231f25",
sample="12b75d1cb7df9dfb4ad73ed82f61094f",
somato="32fd2f6c8c7eb0784a1de6435273c48b",
spm="9f43f67150e3b694b523a21eb929ea75",
testing="ee36633fa9872aa434cc91f836beaa45",
multimodal="26ec847ae9ab80f58f204d09e2c08367",
fnirs_motor="c4935d19ddab35422a69f3326a01fef8",
opm="370ad1dcfd5c47e029e692c85358a374",
visual_92_categories=[
"74f50bbeb65740903eadc229c9fa759f",
"203410a98afc9df9ae8ba9f933370e20",
],
kiloword="3a124170795abbd2e48aae8727e719a8",
mtrf="273a390ebbc48da2c3184b01a82e4636",
fieldtrip_cmc="6f9fd6520f9a66e20994423808d2528c",
phantom_4dbti="938a601440f3ffa780d20a17bae039ff",
refmeg_noise="779fecd890d98b73a4832e717d7c7c45",
)
assert set(md5_hashes.keys()) == set(urls.keys())
url = urls[name]
hash_ = md5_hashes[name]
folder_orig = folder_origs.get(name, None)
if name == "brainstorm":
assert archive_name is not None
url = [url[archive_name.split(".")[0]]]
folder_path = [op.join(path, folder_names[name], archive_name.split(".")[0])]
hash_ = [hash_[archive_name.split(".")[0]]]
archive_name = [archive_name]
else:
url = [url] if not isinstance(url, list) else url
hash_ = [hash_] if not isinstance(hash_, list) else hash_
archive_name = archive_names.get(name)
if archive_name is None:
archive_name = [u.split("/")[-1] for u in url]
if not isinstance(archive_name, list):
archive_name = [archive_name]
folder_path = [
op.join(path, folder_names.get(name, a.split(".")[0])) for a in archive_name
]
if not isinstance(folder_orig, list):
folder_orig = [folder_orig] * len(url)
folder_path = [op.abspath(f) for f in folder_path]
assert hash_ is not None
assert all(isinstance(x, list) for x in (url, archive_name, hash_, folder_path))
assert len(url) == len(archive_name) == len(hash_) == len(folder_path)
logger.debug("URL: %s" % (url,))
logger.debug("archive_name: %s" % (archive_name,))
logger.debug("hash: %s" % (hash_,))
logger.debug("folder_path: %s" % (folder_path,))
need_download = any(not op.exists(f) for f in folder_path)
# additional condition: check for version.txt and parse it
want_version = releases.get(name, None)
want_version = _FAKE_VERSION if name == "fake" else want_version
if not need_download and want_version is not None:
data_version = _dataset_version(folder_path[0], name)
need_download = LooseVersion(data_version) < LooseVersion(want_version)
if need_download:
logger.info(
f"Dataset {name} version {data_version} out of date, "
f"latest version is {want_version}"
)
if need_download and not download:
return ""
if need_download or force_update:
logger.debug(
"Downloading: need_download=%s, force_update=%s"
% (need_download, force_update)
)
for f in folder_path:
logger.debug(" Exists: %s: %s" % (f, op.exists(f)))
if name == "brainstorm":
if accept or "--accept-brainstorm-license" in sys.argv:
answer = "y"
else:
# If they don't have stdin, just accept the license
# https://github.com/mne-tools/mne-python/issues/8513#issuecomment-726823724 # noqa: E501
answer = _safe_input("%sAgree (y/[n])? " % _bst_license_text, use="y")
if answer.lower() != "y":
raise RuntimeError("You must agree to the license to use this dataset")
assert len(url) == len(hash_)
assert len(url) == len(archive_name)
assert len(url) == len(folder_orig)
assert len(url) == len(folder_path)
assert len(url) > 0
# 1. Get all the archives
full_name = list()
for u, an, h, fo in zip(url, archive_name, hash_, folder_orig):
remove_archive, full = _download(path, u, an, h)
full_name.append(full)
del archive_name
# 2. Extract all of the files
remove_dir = True
for u, fp, an, h, fo in zip(url, folder_path, full_name, hash_, folder_orig):
_extract(path, name, fp, an, fo, remove_dir)
remove_dir = False # only do on first iteration
# 3. Remove all of the archives
if remove_archive:
for an in full_name:
os.remove(op.join(path, an))
logger.info("Successfully extracted to: %s" % folder_path)
_do_path_update(path, update_path, key, name)
path = folder_path[0]
# compare the version of the dataset and mne
data_version = _dataset_version(path, name)
# 0.7 < 0.7.git should be False, therefore strip
if check_version and (
LooseVersion(data_version) < LooseVersion(mne_version.strip(".git"))
):
warn(
"The {name} dataset (version {current}) is older than "
"mne-python (version {newest}). If the examples fail, "
"you may need to update the {name} dataset by using "
"mne.datasets.{name}.data_path(force_update=True)".format(
name=name, current=data_version, newest=mne_version
)
)
return (path, data_version) if return_version else path
|
def _data_path(
path=None,
force_update=False,
update_path=True,
download=True,
name=None,
check_version=False,
return_version=False,
archive_name=None,
accept=False,
):
"""Aux function."""
key = {
"fake": "MNE_DATASETS_FAKE_PATH",
"misc": "MNE_DATASETS_MISC_PATH",
"sample": "MNE_DATASETS_SAMPLE_PATH",
"spm": "MNE_DATASETS_SPM_FACE_PATH",
"somato": "MNE_DATASETS_SOMATO_PATH",
"brainstorm": "MNE_DATASETS_BRAINSTORM_PATH",
"testing": "MNE_DATASETS_TESTING_PATH",
"multimodal": "MNE_DATASETS_MULTIMODAL_PATH",
"fnirs_motor": "MNE_DATASETS_FNIRS_MOTOR_PATH",
"opm": "MNE_DATASETS_OPM_PATH",
"visual_92_categories": "MNE_DATASETS_VISUAL_92_CATEGORIES_PATH",
"kiloword": "MNE_DATASETS_KILOWORD_PATH",
"mtrf": "MNE_DATASETS_MTRF_PATH",
"fieldtrip_cmc": "MNE_DATASETS_FIELDTRIP_CMC_PATH",
"phantom_4dbti": "MNE_DATASETS_PHANTOM_4DBTI_PATH",
"limo": "MNE_DATASETS_LIMO_PATH",
"refmeg_noise": "MNE_DATASETS_REFMEG_NOISE_PATH",
}[name]
path = _get_path(path, key, name)
# To update the testing or misc dataset, push commits, then make a new
# release on GitHub. Then update the "releases" variable:
releases = dict(testing="0.115", misc="0.8")
# And also update the "md5_hashes['testing']" variable below.
# To update any other dataset, update the data archive itself (upload
# an updated version) and update the md5 hash.
# try to match url->archive_name->folder_name
urls = dict( # the URLs to use
brainstorm=dict(
bst_auditory="https://osf.io/5t9n8/download?version=1",
bst_phantom_ctf="https://osf.io/sxr8y/download?version=1",
bst_phantom_elekta="https://osf.io/dpcku/download?version=1",
bst_raw="https://osf.io/9675n/download?version=2",
bst_resting="https://osf.io/m7bd3/download?version=3",
),
fake="https://github.com/mne-tools/mne-testing-data/raw/master/"
"datasets/foo.tgz",
misc="https://codeload.github.com/mne-tools/mne-misc-data/"
"tar.gz/%s" % releases["misc"],
sample="https://osf.io/86qa2/download?version=5",
somato="https://osf.io/tp4sg/download?version=7",
spm="https://osf.io/je4s8/download?version=2",
testing="https://codeload.github.com/mne-tools/mne-testing-data/"
"tar.gz/%s" % releases["testing"],
multimodal="https://ndownloader.figshare.com/files/5999598",
fnirs_motor="https://osf.io/dj3eh/download?version=1",
opm="https://osf.io/p6ae7/download?version=2",
visual_92_categories=[
"https://osf.io/8ejrs/download?version=1",
"https://osf.io/t4yjp/download?version=1",
],
mtrf="https://osf.io/h85s2/download?version=1",
kiloword="https://osf.io/qkvf9/download?version=1",
fieldtrip_cmc="https://osf.io/j9b6s/download?version=1",
phantom_4dbti="https://osf.io/v2brw/download?version=2",
refmeg_noise="https://osf.io/drt6v/download?version=1",
)
# filename of the resulting downloaded archive (only needed if the URL
# name does not match resulting filename)
archive_names = dict(
fieldtrip_cmc="SubjectCMC.zip",
kiloword="MNE-kiloword-data.tar.gz",
misc="mne-misc-data-%s.tar.gz" % releases["misc"],
mtrf="mTRF_1.5.zip",
multimodal="MNE-multimodal-data.tar.gz",
fnirs_motor="MNE-fNIRS-motor-data.tgz",
opm="MNE-OPM-data.tar.gz",
sample="MNE-sample-data-processed.tar.gz",
somato="MNE-somato-data.tar.gz",
spm="MNE-spm-face.tar.gz",
testing="mne-testing-data-%s.tar.gz" % releases["testing"],
visual_92_categories=[
"MNE-visual_92_categories-data-part1.tar.gz",
"MNE-visual_92_categories-data-part2.tar.gz",
],
phantom_4dbti="MNE-phantom-4DBTi.zip",
refmeg_noise="sample_reference_MEG_noise-raw.zip",
)
# original folder names that get extracted (only needed if the
# archive does not extract the right folder name; e.g., usually GitHub)
folder_origs = dict( # not listed means None (no need to move)
misc="mne-misc-data-%s" % releases["misc"],
testing="mne-testing-data-%s" % releases["testing"],
)
# finally, where we want them to extract to (only needed if the folder name
# is not the same as the last bit of the archive name without the file
# extension)
folder_names = dict(
brainstorm="MNE-brainstorm-data",
fake="foo",
misc="MNE-misc-data",
mtrf="mTRF_1.5",
sample="MNE-sample-data",
testing="MNE-testing-data",
visual_92_categories="MNE-visual_92_categories-data",
fieldtrip_cmc="MNE-fieldtrip_cmc-data",
phantom_4dbti="MNE-phantom-4DBTi",
refmeg_noise="MNE-refmeg-noise-data",
)
md5_hashes = dict(
brainstorm=dict(
bst_auditory="fa371a889a5688258896bfa29dd1700b",
bst_phantom_ctf="80819cb7f5b92d1a5289db3fb6acb33c",
bst_phantom_elekta="1badccbe17998d18cc373526e86a7aaf",
bst_raw="fa2efaaec3f3d462b319bc24898f440c",
bst_resting="70fc7bf9c3b97c4f2eab6260ee4a0430",
),
fake="3194e9f7b46039bb050a74f3e1ae9908",
misc="0f88194266121dd9409be94184231f25",
sample="12b75d1cb7df9dfb4ad73ed82f61094f",
somato="32fd2f6c8c7eb0784a1de6435273c48b",
spm="9f43f67150e3b694b523a21eb929ea75",
testing="731f4ce20f0cb439c04c719a67ccf4d5",
multimodal="26ec847ae9ab80f58f204d09e2c08367",
fnirs_motor="c4935d19ddab35422a69f3326a01fef8",
opm="370ad1dcfd5c47e029e692c85358a374",
visual_92_categories=[
"74f50bbeb65740903eadc229c9fa759f",
"203410a98afc9df9ae8ba9f933370e20",
],
kiloword="3a124170795abbd2e48aae8727e719a8",
mtrf="273a390ebbc48da2c3184b01a82e4636",
fieldtrip_cmc="6f9fd6520f9a66e20994423808d2528c",
phantom_4dbti="938a601440f3ffa780d20a17bae039ff",
refmeg_noise="779fecd890d98b73a4832e717d7c7c45",
)
assert set(md5_hashes.keys()) == set(urls.keys())
url = urls[name]
hash_ = md5_hashes[name]
folder_orig = folder_origs.get(name, None)
if name == "brainstorm":
assert archive_name is not None
url = [url[archive_name.split(".")[0]]]
folder_path = [op.join(path, folder_names[name], archive_name.split(".")[0])]
hash_ = [hash_[archive_name.split(".")[0]]]
archive_name = [archive_name]
else:
url = [url] if not isinstance(url, list) else url
hash_ = [hash_] if not isinstance(hash_, list) else hash_
archive_name = archive_names.get(name)
if archive_name is None:
archive_name = [u.split("/")[-1] for u in url]
if not isinstance(archive_name, list):
archive_name = [archive_name]
folder_path = [
op.join(path, folder_names.get(name, a.split(".")[0])) for a in archive_name
]
if not isinstance(folder_orig, list):
folder_orig = [folder_orig] * len(url)
folder_path = [op.abspath(f) for f in folder_path]
assert hash_ is not None
assert all(isinstance(x, list) for x in (url, archive_name, hash_, folder_path))
assert len(url) == len(archive_name) == len(hash_) == len(folder_path)
logger.debug("URL: %s" % (url,))
logger.debug("archive_name: %s" % (archive_name,))
logger.debug("hash: %s" % (hash_,))
logger.debug("folder_path: %s" % (folder_path,))
need_download = any(not op.exists(f) for f in folder_path)
# additional condition: check for version.txt and parse it
want_version = releases.get(name, None)
want_version = _FAKE_VERSION if name == "fake" else want_version
if not need_download and want_version is not None:
data_version = _dataset_version(folder_path[0], name)
need_download = LooseVersion(data_version) < LooseVersion(want_version)
if need_download:
logger.info(
f"Dataset {name} version {data_version} out of date, "
f"latest version is {want_version}"
)
if need_download and not download:
return ""
if need_download or force_update:
logger.debug(
"Downloading: need_download=%s, force_update=%s"
% (need_download, force_update)
)
for f in folder_path:
logger.debug(" Exists: %s: %s" % (f, op.exists(f)))
if name == "brainstorm":
if accept or "--accept-brainstorm-license" in sys.argv:
answer = "y"
else:
# If they don't have stdin, just accept the license
# https://github.com/mne-tools/mne-python/issues/8513#issuecomment-726823724 # noqa: E501
answer = _safe_input("%sAgree (y/[n])? " % _bst_license_text, use="y")
if answer.lower() != "y":
raise RuntimeError("You must agree to the license to use this dataset")
assert len(url) == len(hash_)
assert len(url) == len(archive_name)
assert len(url) == len(folder_orig)
assert len(url) == len(folder_path)
assert len(url) > 0
# 1. Get all the archives
full_name = list()
for u, an, h, fo in zip(url, archive_name, hash_, folder_orig):
remove_archive, full = _download(path, u, an, h)
full_name.append(full)
del archive_name
# 2. Extract all of the files
remove_dir = True
for u, fp, an, h, fo in zip(url, folder_path, full_name, hash_, folder_orig):
_extract(path, name, fp, an, fo, remove_dir)
remove_dir = False # only do on first iteration
# 3. Remove all of the archives
if remove_archive:
for an in full_name:
os.remove(op.join(path, an))
logger.info("Successfully extracted to: %s" % folder_path)
_do_path_update(path, update_path, key, name)
path = folder_path[0]
# compare the version of the dataset and mne
data_version = _dataset_version(path, name)
# 0.7 < 0.7.git should be False, therefore strip
if check_version and (
LooseVersion(data_version) < LooseVersion(mne_version.strip(".git"))
):
warn(
"The {name} dataset (version {current}) is older than "
"mne-python (version {newest}). If the examples fail, "
"you may need to update the {name} dataset by using "
"mne.datasets.{name}.data_path(force_update=True)".format(
name=name, current=data_version, newest=mne_version
)
)
return (path, data_version) if return_version else path
|
https://github.com/mne-tools/mne-python/issues/8864
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-17-1519c0d84577> in <module>
----> 1 raw = mne.io.read_epochs_eeglab(input_fname = data_path + '/'+ subject_name + '_play_merged.set')
~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mne/io/eeglab/eeglab.py in read_epochs_eeglab(input_fname, events, event_id, eog, verbose, uint16_codec)
277 epochs = EpochsEEGLAB(input_fname=input_fname, events=events, eog=eog,
278 event_id=event_id, verbose=verbose,
--> 279 uint16_codec=uint16_codec)
280 return epochs
281
<decorator-gen-198> in __init__(self, input_fname, events, event_id, tmin, baseline, reject, flat, reject_tmin, reject_tmax, montage, eog, verbose, uint16_codec)
~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mne/io/eeglab/eeglab.py in __init__(self, input_fname, events, event_id, tmin, baseline, reject, flat, reject_tmin, reject_tmax, montage, eog, verbose, uint16_codec)
449 uint16_codec=None): # noqa: D102
450 eeg = _check_load_mat(input_fname, uint16_codec)
--> 451
452 if not ((events is None and event_id is None) or
453 (events is not None and event_id is not None)):
~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mne/io/eeglab/eeglab.py in _check_load_mat(fname, uint16_codec)
63 'mne-python developers for more information.')
64 if 'EEG' not in eeg:
---> 65 raise ValueError('Could not find EEG array in the .set file.')
66 else:
67 eeg = eeg['EEG']
ValueError: Could not find EEG array in the .set file.
|
ValueError
|
def _check_load_mat(fname, uint16_codec):
"""Check if the mat struct contains 'EEG'."""
from ...externals.pymatreader import read_mat
eeg = read_mat(fname, uint16_codec=uint16_codec)
if "ALLEEG" in eeg:
raise NotImplementedError(
"Loading an ALLEEG array is not supported. Please contact"
"mne-python developers for more information."
)
if "EEG" in eeg: # fields are contained in EEG structure
eeg = eeg["EEG"]
eeg = eeg.get("EEG", eeg) # handle nested EEG structure
eeg = Bunch(**eeg)
eeg.trials = int(eeg.trials)
eeg.nbchan = int(eeg.nbchan)
eeg.pnts = int(eeg.pnts)
return eeg
|
def _check_load_mat(fname, uint16_codec):
"""Check if the mat struct contains 'EEG'."""
from ...externals.pymatreader import read_mat
eeg = read_mat(fname, uint16_codec=uint16_codec)
if "ALLEEG" in eeg:
raise NotImplementedError(
"Loading an ALLEEG array is not supported. Please contact"
"mne-python developers for more information."
)
if "EEG" not in eeg:
raise ValueError("Could not find EEG array in the .set file.")
else:
eeg = eeg["EEG"]
eeg = eeg.get("EEG", eeg) # handle nested EEG structure
eeg = Bunch(**eeg)
eeg.trials = int(eeg.trials)
eeg.nbchan = int(eeg.nbchan)
eeg.pnts = int(eeg.pnts)
return eeg
|
https://github.com/mne-tools/mne-python/issues/8864
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-17-1519c0d84577> in <module>
----> 1 raw = mne.io.read_epochs_eeglab(input_fname = data_path + '/'+ subject_name + '_play_merged.set')
~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mne/io/eeglab/eeglab.py in read_epochs_eeglab(input_fname, events, event_id, eog, verbose, uint16_codec)
277 epochs = EpochsEEGLAB(input_fname=input_fname, events=events, eog=eog,
278 event_id=event_id, verbose=verbose,
--> 279 uint16_codec=uint16_codec)
280 return epochs
281
<decorator-gen-198> in __init__(self, input_fname, events, event_id, tmin, baseline, reject, flat, reject_tmin, reject_tmax, montage, eog, verbose, uint16_codec)
~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mne/io/eeglab/eeglab.py in __init__(self, input_fname, events, event_id, tmin, baseline, reject, flat, reject_tmin, reject_tmax, montage, eog, verbose, uint16_codec)
449 uint16_codec=None): # noqa: D102
450 eeg = _check_load_mat(input_fname, uint16_codec)
--> 451
452 if not ((events is None and event_id is None) or
453 (events is not None and event_id is not None)):
~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mne/io/eeglab/eeglab.py in _check_load_mat(fname, uint16_codec)
63 'mne-python developers for more information.')
64 if 'EEG' not in eeg:
---> 65 raise ValueError('Could not find EEG array in the .set file.')
66 else:
67 eeg = eeg['EEG']
ValueError: Could not find EEG array in the .set file.
|
ValueError
|
def _get_and_verify_data_sizes(data, sfreq, n_signals=None, n_times=None, times=None):
"""Get and/or verify the data sizes and time scales."""
if not isinstance(data, (list, tuple)):
raise ValueError("data has to be a list or tuple")
n_signals_tot = 0
for this_data in data:
this_n_signals, this_n_times = this_data.shape
if n_times is not None:
if this_n_times != n_times:
raise ValueError(
"all input time series must have the same number of time points"
)
else:
n_times = this_n_times
n_signals_tot += this_n_signals
if hasattr(this_data, "times"):
assert isinstance(this_data, _BaseSourceEstimate)
this_times = this_data.times
if times is not None:
if np.any(times != this_times):
warn("time scales of input time series do not match")
else:
times = this_times
elif times is None:
times = _arange_div(n_times, sfreq)
if n_signals is not None:
if n_signals != n_signals_tot:
raise ValueError(
"the number of time series has to be the same in each epoch"
)
n_signals = n_signals_tot
return n_signals, n_times, times
|
def _get_and_verify_data_sizes(data, n_signals=None, n_times=None, times=None):
"""Get and/or verify the data sizes and time scales."""
if not isinstance(data, (list, tuple)):
raise ValueError("data has to be a list or tuple")
n_signals_tot = 0
for this_data in data:
this_n_signals, this_n_times = this_data.shape
if n_times is not None:
if this_n_times != n_times:
raise ValueError(
"all input time series must have the same number of time points"
)
else:
n_times = this_n_times
n_signals_tot += this_n_signals
if hasattr(this_data, "times"):
this_times = this_data.times
if times is not None:
if np.any(times != this_times):
warn("time scales of input time series do not match")
else:
times = this_times
if n_signals is not None:
if n_signals != n_signals_tot:
raise ValueError(
"the number of time series has to be the same in each epoch"
)
n_signals = n_signals_tot
return n_signals, n_times, times
|
https://github.com/mne-tools/mne-python/issues/8824
|
Connectivity computation...
Traceback (most recent call last):
File "/home/ap/PycharmProjects/eeg_processing/conn_example.py", line 30, in <module>
faverage=True, tmin=tmin, tmax=tmax, mt_adaptive=False, n_jobs=1)
File "<decorator-gen-387>", line 21, in spectral_connectivity
File "/home/ap/miniconda3/envs/MNE/lib/python3.7/site-packages/mne/connectivity/spectral.py", line 789, in spectral_connectivity
cwt_freqs=cwt_freqs, faverage=faverage)
File "/home/ap/miniconda3/envs/MNE/lib/python3.7/site-packages/mne/connectivity/spectral.py", line 948, in _prepare_connectivity
mask = _time_mask(times_in, tmin, tmax, sfreq=sfreq)
File "/home/ap/miniconda3/envs/MNE/lib/python3.7/site-packages/mne/utils/numerics.py", line 499, in _time_mask
% (orig_tmin, orig_tmax, extra, times[0], times[-1]))
ValueError: No samples remain when using tmin=-0.5 and tmax=-0.1 (original time bounds are [0.0, 2.4974401644798485])
|
ValueError
|
def spectral_connectivity(
data,
method="coh",
indices=None,
sfreq=2 * np.pi,
mode="multitaper",
fmin=None,
fmax=np.inf,
fskip=0,
faverage=False,
tmin=None,
tmax=None,
mt_bandwidth=None,
mt_adaptive=False,
mt_low_bias=True,
cwt_freqs=None,
cwt_n_cycles=7,
block_size=1000,
n_jobs=1,
verbose=None,
):
"""Compute frequency- and time-frequency-domain connectivity measures.
The connectivity method(s) are specified using the "method" parameter.
All methods are based on estimates of the cross- and power spectral
densities (CSD/PSD) Sxy and Sxx, Syy.
Parameters
----------
data : array-like, shape=(n_epochs, n_signals, n_times) | Epochs
The data from which to compute connectivity. Note that it is also
possible to combine multiple signals by providing a list of tuples,
e.g., data = [(arr_0, stc_0), (arr_1, stc_1), (arr_2, stc_2)],
corresponds to 3 epochs, and arr_* could be an array with the same
number of time points as stc_*. The array-like object can also
be a list/generator of array, shape =(n_signals, n_times),
or a list/generator of SourceEstimate or VolSourceEstimate objects.
method : str | list of str
Connectivity measure(s) to compute.
indices : tuple of array | None
Two arrays with indices of connections for which to compute
connectivity. If None, all connections are computed.
sfreq : float
The sampling frequency.
mode : str
Spectrum estimation mode can be either: 'multitaper', 'fourier', or
'cwt_morlet'.
fmin : float | tuple of float
The lower frequency of interest. Multiple bands are defined using
a tuple, e.g., (8., 20.) for two bands with 8Hz and 20Hz lower freq.
If None the frequency corresponding to an epoch length of 5 cycles
is used.
fmax : float | tuple of float
The upper frequency of interest. Multiple bands are dedined using
a tuple, e.g. (13., 30.) for two band with 13Hz and 30Hz upper freq.
fskip : int
Omit every "(fskip + 1)-th" frequency bin to decimate in frequency
domain.
faverage : bool
Average connectivity scores for each frequency band. If True,
the output freqs will be a list with arrays of the frequencies
that were averaged.
tmin : float | None
Time to start connectivity estimation. Note: when "data" is an array,
the first sample is assumed to be at time 0. For other types
(Epochs, etc.), the time information contained in the object is used
to compute the time indices.
tmax : float | None
Time to end connectivity estimation. Note: when "data" is an array,
the first sample is assumed to be at time 0. For other types
(Epochs, etc.), the time information contained in the object is used
to compute the time indices.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'multitaper' mode.
mt_adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD.
Only used in 'multitaper' mode.
mt_low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth. Only used in 'multitaper' mode.
cwt_freqs : array
Array of frequencies of interest. Only used in 'cwt_morlet' mode.
cwt_n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency. Only used in
'cwt_morlet' mode.
block_size : int
How many connections to compute at once (higher numbers are faster
but require more memory).
n_jobs : int
How many epochs to process in parallel.
%(verbose)s
Returns
-------
con : array | list of array
Computed connectivity measure(s). The shape of each array is either
(n_signals, n_signals, n_freqs) mode: 'multitaper' or 'fourier'
(n_signals, n_signals, n_freqs, n_times) mode: 'cwt_morlet'
when "indices" is None, or
(n_con, n_freqs) mode: 'multitaper' or 'fourier'
(n_con, n_freqs, n_times) mode: 'cwt_morlet'
when "indices" is specified and "n_con = len(indices[0])".
freqs : array
Frequency points at which the connectivity was computed.
times : array
Time points for which the connectivity was computed.
n_epochs : int
Number of epochs used for computation.
n_tapers : int
The number of DPSS tapers used. Only defined in 'multitaper' mode.
Otherwise None is returned.
Notes
-----
The spectral densities can be estimated using a multitaper method with
digital prolate spheroidal sequence (DPSS) windows, a discrete Fourier
transform with Hanning windows, or a continuous wavelet transform using
Morlet wavelets. The spectral estimation mode is specified using the
"mode" parameter.
By default, the connectivity between all signals is computed (only
connections corresponding to the lower-triangular part of the
connectivity matrix). If one is only interested in the connectivity
between some signals, the "indices" parameter can be used. For example,
to compute the connectivity between the signal with index 0 and signals
"2, 3, 4" (a total of 3 connections) one can use the following::
indices = (np.array([0, 0, 0]), # row indices
np.array([2, 3, 4])) # col indices
con_flat = spectral_connectivity(data, method='coh',
indices=indices, ...)
In this case con_flat.shape = (3, n_freqs). The connectivity scores are
in the same order as defined indices.
**Supported Connectivity Measures**
The connectivity method(s) is specified using the "method" parameter. The
following methods are supported (note: ``E[]`` denotes average over
epochs). Multiple measures can be computed at once by using a list/tuple,
e.g., ``['coh', 'pli']`` to compute coherence and PLI.
'coh' : Coherence given by::
| E[Sxy] |
C = ---------------------
sqrt(E[Sxx] * E[Syy])
'cohy' : Coherency given by::
E[Sxy]
C = ---------------------
sqrt(E[Sxx] * E[Syy])
'imcoh' : Imaginary coherence [1]_ given by::
Im(E[Sxy])
C = ----------------------
sqrt(E[Sxx] * E[Syy])
'plv' : Phase-Locking Value (PLV) [2]_ given by::
PLV = |E[Sxy/|Sxy|]|
'ciplv' : corrected imaginary PLV (icPLV) [3]_ given by::
|E[Im(Sxy/|Sxy|)]|
ciPLV = ------------------------------------
sqrt(1 - |E[real(Sxy/|Sxy|)]| ** 2)
'ppc' : Pairwise Phase Consistency (PPC), an unbiased estimator
of squared PLV [4]_.
'pli' : Phase Lag Index (PLI) [5]_ given by::
PLI = |E[sign(Im(Sxy))]|
'pli2_unbiased' : Unbiased estimator of squared PLI [6]_.
'wpli' : Weighted Phase Lag Index (WPLI) [6]_ given by::
|E[Im(Sxy)]|
WPLI = ------------------
E[|Im(Sxy)|]
'wpli2_debiased' : Debiased estimator of squared WPLI [6]_.
References
----------
.. [1] Nolte et al. "Identifying true brain interaction from EEG data using
the imaginary part of coherency" Clinical neurophysiology, vol. 115,
no. 10, pp. 2292-2307, Oct. 2004.
.. [2] Lachaux et al. "Measuring phase synchrony in brain signals" Human
brain mapping, vol. 8, no. 4, pp. 194-208, Jan. 1999.
.. [3] Bruña et al. "Phase locking value revisited: teaching new tricks to
an old dog" Journal of Neural Engineering, vol. 15, no. 5, pp.
056011 , Jul. 2018.
.. [4] Vinck et al. "The pairwise phase consistency: a bias-free measure of
rhythmic neuronal synchronization" NeuroImage, vol. 51, no. 1,
pp. 112-122, May 2010.
.. [5] Stam et al. "Phase lag index: assessment of functional connectivity
from multi channel EEG and MEG with diminished bias from common
sources" Human brain mapping, vol. 28, no. 11, pp. 1178-1193,
Nov. 2007.
.. [6] Vinck et al. "An improved index of phase-synchronization for
electro-physiological data in the presence of volume-conduction,
noise and sample-size bias" NeuroImage, vol. 55, no. 4,
pp. 1548-1565, Apr. 2011.
"""
if n_jobs != 1:
parallel, my_epoch_spectral_connectivity, _ = parallel_func(
_epoch_spectral_connectivity, n_jobs, verbose=verbose
)
# format fmin and fmax and check inputs
if fmin is None:
fmin = -np.inf # set it to -inf, so we can adjust it later
fmin = np.array((fmin,), dtype=float).ravel()
fmax = np.array((fmax,), dtype=float).ravel()
if len(fmin) != len(fmax):
raise ValueError("fmin and fmax must have the same length")
if np.any(fmin > fmax):
raise ValueError("fmax must be larger than fmin")
n_bands = len(fmin)
# assign names to connectivity methods
if not isinstance(method, (list, tuple)):
method = [method] # make it a list so we can iterate over it
# handle connectivity estimators
(con_method_types, n_methods, accumulate_psd, n_comp_args) = _check_estimators(
method=method, mode=mode
)
if isinstance(data, BaseEpochs):
times_in = data.times # input times for Epochs input type
sfreq = data.info["sfreq"]
else:
times_in = None
# loop over data; it could be a generator that returns
# (n_signals x n_times) arrays or SourceEstimates
epoch_idx = 0
logger.info("Connectivity computation...")
for epoch_block in _get_n_epochs(data, n_jobs):
if epoch_idx == 0:
# initialize everything times and frequencies
(
n_cons,
times,
n_times,
times_in,
n_times_in,
tmin_idx,
tmax_idx,
n_freqs,
freq_mask,
freqs,
freqs_bands,
freq_idx_bands,
n_signals,
indices_use,
) = _prepare_connectivity(
epoch_block=epoch_block,
times_in=times_in,
tmin=tmin,
tmax=tmax,
fmin=fmin,
fmax=fmax,
sfreq=sfreq,
indices=indices,
mode=mode,
fskip=fskip,
n_bands=n_bands,
cwt_freqs=cwt_freqs,
faverage=faverage,
)
# get the window function, wavelets, etc for different modes
(spectral_params, mt_adaptive, n_times_spectrum, n_tapers) = (
_assemble_spectral_params(
mode=mode,
n_times=n_times,
mt_adaptive=mt_adaptive,
mt_bandwidth=mt_bandwidth,
sfreq=sfreq,
mt_low_bias=mt_low_bias,
cwt_n_cycles=cwt_n_cycles,
cwt_freqs=cwt_freqs,
freqs=freqs,
freq_mask=freq_mask,
)
)
# unique signals for which we actually need to compute PSD etc.
sig_idx = np.unique(np.r_[indices_use[0], indices_use[1]])
# map indices to unique indices
idx_map = [np.searchsorted(sig_idx, ind) for ind in indices_use]
# allocate space to accumulate PSD
if accumulate_psd:
if n_times_spectrum == 0:
psd_shape = (len(sig_idx), n_freqs)
else:
psd_shape = (len(sig_idx), n_freqs, n_times_spectrum)
psd = np.zeros(psd_shape)
else:
psd = None
# create instances of the connectivity estimators
con_methods = [
mtype(n_cons, n_freqs, n_times_spectrum) for mtype in con_method_types
]
sep = ", "
metrics_str = sep.join([meth.name for meth in con_methods])
logger.info(" the following metrics will be computed: %s" % metrics_str)
# check dimensions and time scale
for this_epoch in epoch_block:
_get_and_verify_data_sizes(
this_epoch, sfreq, n_signals, n_times_in, times_in
)
call_params = dict(
sig_idx=sig_idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx,
sfreq=sfreq,
mode=mode,
freq_mask=freq_mask,
idx_map=idx_map,
block_size=block_size,
psd=psd,
accumulate_psd=accumulate_psd,
mt_adaptive=mt_adaptive,
con_method_types=con_method_types,
con_methods=con_methods if n_jobs == 1 else None,
n_signals=n_signals,
n_times=n_times,
accumulate_inplace=True if n_jobs == 1 else False,
)
call_params.update(**spectral_params)
if n_jobs == 1:
# no parallel processing
for this_epoch in epoch_block:
logger.info(" computing connectivity for epoch %d" % (epoch_idx + 1))
# con methods and psd are updated inplace
_epoch_spectral_connectivity(data=this_epoch, **call_params)
epoch_idx += 1
else:
# process epochs in parallel
logger.info(
" computing connectivity for epochs %d..%d"
% (epoch_idx + 1, epoch_idx + len(epoch_block))
)
out = parallel(
my_epoch_spectral_connectivity(data=this_epoch, **call_params)
for this_epoch in epoch_block
)
# do the accumulation
for this_out in out:
for method, parallel_method in zip(con_methods, this_out[0]):
method.combine(parallel_method)
if accumulate_psd:
psd += this_out[1]
epoch_idx += len(epoch_block)
# normalize
n_epochs = epoch_idx
if accumulate_psd:
psd /= n_epochs
# compute final connectivity scores
con = list()
for method, n_args in zip(con_methods, n_comp_args):
# future estimators will need to be handled here
if n_args == 3:
# compute all scores at once
method.compute_con(slice(0, n_cons), n_epochs)
elif n_args == 5:
# compute scores block-wise to save memory
for i in range(0, n_cons, block_size):
con_idx = slice(i, i + block_size)
psd_xx = psd[idx_map[0][con_idx]]
psd_yy = psd[idx_map[1][con_idx]]
method.compute_con(con_idx, n_epochs, psd_xx, psd_yy)
else:
raise RuntimeError("This should never happen.")
# get the connectivity scores
this_con = method.con_scores
if this_con.shape[0] != n_cons:
raise ValueError(
"First dimension of connectivity scores must be "
"the same as the number of connections"
)
if faverage:
if this_con.shape[1] != n_freqs:
raise ValueError(
"2nd dimension of connectivity scores must "
"be the same as the number of frequencies"
)
con_shape = (n_cons, n_bands) + this_con.shape[2:]
this_con_bands = np.empty(con_shape, dtype=this_con.dtype)
for band_idx in range(n_bands):
this_con_bands[:, band_idx] = np.mean(
this_con[:, freq_idx_bands[band_idx]], axis=1
)
this_con = this_con_bands
con.append(this_con)
if indices is None:
# return all-to-all connectivity matrices
logger.info(
" assembling connectivity matrix "
"(filling the upper triangular region of the matrix)"
)
con_flat = con
con = list()
for this_con_flat in con_flat:
this_con = np.zeros(
(n_signals, n_signals) + this_con_flat.shape[1:],
dtype=this_con_flat.dtype,
)
this_con[indices_use] = this_con_flat
con.append(this_con)
logger.info("[Connectivity computation done]")
if n_methods == 1:
# for a single method return connectivity directly
con = con[0]
if faverage:
# for each band we return the frequencies that were averaged
freqs = freqs_bands
return con, freqs, times, n_epochs, n_tapers
|
def spectral_connectivity(
data,
method="coh",
indices=None,
sfreq=2 * np.pi,
mode="multitaper",
fmin=None,
fmax=np.inf,
fskip=0,
faverage=False,
tmin=None,
tmax=None,
mt_bandwidth=None,
mt_adaptive=False,
mt_low_bias=True,
cwt_freqs=None,
cwt_n_cycles=7,
block_size=1000,
n_jobs=1,
verbose=None,
):
"""Compute frequency- and time-frequency-domain connectivity measures.
The connectivity method(s) are specified using the "method" parameter.
All methods are based on estimates of the cross- and power spectral
densities (CSD/PSD) Sxy and Sxx, Syy.
Parameters
----------
data : array-like, shape=(n_epochs, n_signals, n_times) | Epochs
The data from which to compute connectivity. Note that it is also
possible to combine multiple signals by providing a list of tuples,
e.g., data = [(arr_0, stc_0), (arr_1, stc_1), (arr_2, stc_2)],
corresponds to 3 epochs, and arr_* could be an array with the same
number of time points as stc_*. The array-like object can also
be a list/generator of array, shape =(n_signals, n_times),
or a list/generator of SourceEstimate or VolSourceEstimate objects.
method : str | list of str
Connectivity measure(s) to compute.
indices : tuple of array | None
Two arrays with indices of connections for which to compute
connectivity. If None, all connections are computed.
sfreq : float
The sampling frequency.
mode : str
Spectrum estimation mode can be either: 'multitaper', 'fourier', or
'cwt_morlet'.
fmin : float | tuple of float
The lower frequency of interest. Multiple bands are defined using
a tuple, e.g., (8., 20.) for two bands with 8Hz and 20Hz lower freq.
If None the frequency corresponding to an epoch length of 5 cycles
is used.
fmax : float | tuple of float
The upper frequency of interest. Multiple bands are dedined using
a tuple, e.g. (13., 30.) for two band with 13Hz and 30Hz upper freq.
fskip : int
Omit every "(fskip + 1)-th" frequency bin to decimate in frequency
domain.
faverage : bool
Average connectivity scores for each frequency band. If True,
the output freqs will be a list with arrays of the frequencies
that were averaged.
tmin : float | None
Time to start connectivity estimation. Note: when "data" is an array,
the first sample is assumed to be at time 0. For other types
(Epochs, etc.), the time information contained in the object is used
to compute the time indices.
tmax : float | None
Time to end connectivity estimation. Note: when "data" is an array,
the first sample is assumed to be at time 0. For other types
(Epochs, etc.), the time information contained in the object is used
to compute the time indices.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'multitaper' mode.
mt_adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD.
Only used in 'multitaper' mode.
mt_low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth. Only used in 'multitaper' mode.
cwt_freqs : array
Array of frequencies of interest. Only used in 'cwt_morlet' mode.
cwt_n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency. Only used in
'cwt_morlet' mode.
block_size : int
How many connections to compute at once (higher numbers are faster
but require more memory).
n_jobs : int
How many epochs to process in parallel.
%(verbose)s
Returns
-------
con : array | list of array
Computed connectivity measure(s). The shape of each array is either
(n_signals, n_signals, n_freqs) mode: 'multitaper' or 'fourier'
(n_signals, n_signals, n_freqs, n_times) mode: 'cwt_morlet'
when "indices" is None, or
(n_con, n_freqs) mode: 'multitaper' or 'fourier'
(n_con, n_freqs, n_times) mode: 'cwt_morlet'
when "indices" is specified and "n_con = len(indices[0])".
freqs : array
Frequency points at which the connectivity was computed.
times : array
Time points for which the connectivity was computed.
n_epochs : int
Number of epochs used for computation.
n_tapers : int
The number of DPSS tapers used. Only defined in 'multitaper' mode.
Otherwise None is returned.
Notes
-----
The spectral densities can be estimated using a multitaper method with
digital prolate spheroidal sequence (DPSS) windows, a discrete Fourier
transform with Hanning windows, or a continuous wavelet transform using
Morlet wavelets. The spectral estimation mode is specified using the
"mode" parameter.
By default, the connectivity between all signals is computed (only
connections corresponding to the lower-triangular part of the
connectivity matrix). If one is only interested in the connectivity
between some signals, the "indices" parameter can be used. For example,
to compute the connectivity between the signal with index 0 and signals
"2, 3, 4" (a total of 3 connections) one can use the following::
indices = (np.array([0, 0, 0]), # row indices
np.array([2, 3, 4])) # col indices
con_flat = spectral_connectivity(data, method='coh',
indices=indices, ...)
In this case con_flat.shape = (3, n_freqs). The connectivity scores are
in the same order as defined indices.
**Supported Connectivity Measures**
The connectivity method(s) is specified using the "method" parameter. The
following methods are supported (note: ``E[]`` denotes average over
epochs). Multiple measures can be computed at once by using a list/tuple,
e.g., ``['coh', 'pli']`` to compute coherence and PLI.
'coh' : Coherence given by::
| E[Sxy] |
C = ---------------------
sqrt(E[Sxx] * E[Syy])
'cohy' : Coherency given by::
E[Sxy]
C = ---------------------
sqrt(E[Sxx] * E[Syy])
'imcoh' : Imaginary coherence [1]_ given by::
Im(E[Sxy])
C = ----------------------
sqrt(E[Sxx] * E[Syy])
'plv' : Phase-Locking Value (PLV) [2]_ given by::
PLV = |E[Sxy/|Sxy|]|
'ciplv' : corrected imaginary PLV (icPLV) [3]_ given by::
|E[Im(Sxy/|Sxy|)]|
ciPLV = ------------------------------------
sqrt(1 - |E[real(Sxy/|Sxy|)]| ** 2)
'ppc' : Pairwise Phase Consistency (PPC), an unbiased estimator
of squared PLV [4]_.
'pli' : Phase Lag Index (PLI) [5]_ given by::
PLI = |E[sign(Im(Sxy))]|
'pli2_unbiased' : Unbiased estimator of squared PLI [6]_.
'wpli' : Weighted Phase Lag Index (WPLI) [6]_ given by::
|E[Im(Sxy)]|
WPLI = ------------------
E[|Im(Sxy)|]
'wpli2_debiased' : Debiased estimator of squared WPLI [6]_.
References
----------
.. [1] Nolte et al. "Identifying true brain interaction from EEG data using
the imaginary part of coherency" Clinical neurophysiology, vol. 115,
no. 10, pp. 2292-2307, Oct. 2004.
.. [2] Lachaux et al. "Measuring phase synchrony in brain signals" Human
brain mapping, vol. 8, no. 4, pp. 194-208, Jan. 1999.
.. [3] Bruña et al. "Phase locking value revisited: teaching new tricks to
an old dog" Journal of Neural Engineering, vol. 15, no. 5, pp.
056011 , Jul. 2018.
.. [4] Vinck et al. "The pairwise phase consistency: a bias-free measure of
rhythmic neuronal synchronization" NeuroImage, vol. 51, no. 1,
pp. 112-122, May 2010.
.. [5] Stam et al. "Phase lag index: assessment of functional connectivity
from multi channel EEG and MEG with diminished bias from common
sources" Human brain mapping, vol. 28, no. 11, pp. 1178-1193,
Nov. 2007.
.. [6] Vinck et al. "An improved index of phase-synchronization for
electro-physiological data in the presence of volume-conduction,
noise and sample-size bias" NeuroImage, vol. 55, no. 4,
pp. 1548-1565, Apr. 2011.
"""
if n_jobs != 1:
parallel, my_epoch_spectral_connectivity, _ = parallel_func(
_epoch_spectral_connectivity, n_jobs, verbose=verbose
)
# format fmin and fmax and check inputs
if fmin is None:
fmin = -np.inf # set it to -inf, so we can adjust it later
fmin = np.array((fmin,), dtype=float).ravel()
fmax = np.array((fmax,), dtype=float).ravel()
if len(fmin) != len(fmax):
raise ValueError("fmin and fmax must have the same length")
if np.any(fmin > fmax):
raise ValueError("fmax must be larger than fmin")
n_bands = len(fmin)
# assign names to connectivity methods
if not isinstance(method, (list, tuple)):
method = [method] # make it a list so we can iterate over it
# handle connectivity estimators
(con_method_types, n_methods, accumulate_psd, n_comp_args) = _check_estimators(
method=method, mode=mode
)
if isinstance(data, BaseEpochs):
times_in = data.times # input times for Epochs input type
sfreq = data.info["sfreq"]
# loop over data; it could be a generator that returns
# (n_signals x n_times) arrays or SourceEstimates
epoch_idx = 0
logger.info("Connectivity computation...")
for epoch_block in _get_n_epochs(data, n_jobs):
if epoch_idx == 0:
# initialize everything times and frequencies
(
n_cons,
times,
n_times,
times_in,
n_times_in,
tmin_idx,
tmax_idx,
n_freqs,
freq_mask,
freqs,
freqs_bands,
freq_idx_bands,
n_signals,
indices_use,
) = _prepare_connectivity(
epoch_block=epoch_block,
tmin=tmin,
tmax=tmax,
fmin=fmin,
fmax=fmax,
sfreq=sfreq,
indices=indices,
mode=mode,
fskip=fskip,
n_bands=n_bands,
cwt_freqs=cwt_freqs,
faverage=faverage,
)
# get the window function, wavelets, etc for different modes
(spectral_params, mt_adaptive, n_times_spectrum, n_tapers) = (
_assemble_spectral_params(
mode=mode,
n_times=n_times,
mt_adaptive=mt_adaptive,
mt_bandwidth=mt_bandwidth,
sfreq=sfreq,
mt_low_bias=mt_low_bias,
cwt_n_cycles=cwt_n_cycles,
cwt_freqs=cwt_freqs,
freqs=freqs,
freq_mask=freq_mask,
)
)
# unique signals for which we actually need to compute PSD etc.
sig_idx = np.unique(np.r_[indices_use[0], indices_use[1]])
# map indices to unique indices
idx_map = [np.searchsorted(sig_idx, ind) for ind in indices_use]
# allocate space to accumulate PSD
if accumulate_psd:
if n_times_spectrum == 0:
psd_shape = (len(sig_idx), n_freqs)
else:
psd_shape = (len(sig_idx), n_freqs, n_times_spectrum)
psd = np.zeros(psd_shape)
else:
psd = None
# create instances of the connectivity estimators
con_methods = [
mtype(n_cons, n_freqs, n_times_spectrum) for mtype in con_method_types
]
sep = ", "
metrics_str = sep.join([meth.name for meth in con_methods])
logger.info(" the following metrics will be computed: %s" % metrics_str)
# check dimensions and time scale
for this_epoch in epoch_block:
_get_and_verify_data_sizes(this_epoch, n_signals, n_times_in, times_in)
call_params = dict(
sig_idx=sig_idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx,
sfreq=sfreq,
mode=mode,
freq_mask=freq_mask,
idx_map=idx_map,
block_size=block_size,
psd=psd,
accumulate_psd=accumulate_psd,
mt_adaptive=mt_adaptive,
con_method_types=con_method_types,
con_methods=con_methods if n_jobs == 1 else None,
n_signals=n_signals,
n_times=n_times,
accumulate_inplace=True if n_jobs == 1 else False,
)
call_params.update(**spectral_params)
if n_jobs == 1:
# no parallel processing
for this_epoch in epoch_block:
logger.info(" computing connectivity for epoch %d" % (epoch_idx + 1))
# con methods and psd are updated inplace
_epoch_spectral_connectivity(data=this_epoch, **call_params)
epoch_idx += 1
else:
# process epochs in parallel
logger.info(
" computing connectivity for epochs %d..%d"
% (epoch_idx + 1, epoch_idx + len(epoch_block))
)
out = parallel(
my_epoch_spectral_connectivity(data=this_epoch, **call_params)
for this_epoch in epoch_block
)
# do the accumulation
for this_out in out:
for method, parallel_method in zip(con_methods, this_out[0]):
method.combine(parallel_method)
if accumulate_psd:
psd += this_out[1]
epoch_idx += len(epoch_block)
# normalize
n_epochs = epoch_idx
if accumulate_psd:
psd /= n_epochs
# compute final connectivity scores
con = list()
for method, n_args in zip(con_methods, n_comp_args):
# future estimators will need to be handled here
if n_args == 3:
# compute all scores at once
method.compute_con(slice(0, n_cons), n_epochs)
elif n_args == 5:
# compute scores block-wise to save memory
for i in range(0, n_cons, block_size):
con_idx = slice(i, i + block_size)
psd_xx = psd[idx_map[0][con_idx]]
psd_yy = psd[idx_map[1][con_idx]]
method.compute_con(con_idx, n_epochs, psd_xx, psd_yy)
else:
raise RuntimeError("This should never happen.")
# get the connectivity scores
this_con = method.con_scores
if this_con.shape[0] != n_cons:
raise ValueError(
"First dimension of connectivity scores must be "
"the same as the number of connections"
)
if faverage:
if this_con.shape[1] != n_freqs:
raise ValueError(
"2nd dimension of connectivity scores must "
"be the same as the number of frequencies"
)
con_shape = (n_cons, n_bands) + this_con.shape[2:]
this_con_bands = np.empty(con_shape, dtype=this_con.dtype)
for band_idx in range(n_bands):
this_con_bands[:, band_idx] = np.mean(
this_con[:, freq_idx_bands[band_idx]], axis=1
)
this_con = this_con_bands
con.append(this_con)
if indices is None:
# return all-to-all connectivity matrices
logger.info(
" assembling connectivity matrix "
"(filling the upper triangular region of the matrix)"
)
con_flat = con
con = list()
for this_con_flat in con_flat:
this_con = np.zeros(
(n_signals, n_signals) + this_con_flat.shape[1:],
dtype=this_con_flat.dtype,
)
this_con[indices_use] = this_con_flat
con.append(this_con)
logger.info("[Connectivity computation done]")
if n_methods == 1:
# for a single method return connectivity directly
con = con[0]
if faverage:
# for each band we return the frequencies that were averaged
freqs = freqs_bands
return con, freqs, times, n_epochs, n_tapers
|
https://github.com/mne-tools/mne-python/issues/8824
|
Connectivity computation...
Traceback (most recent call last):
File "/home/ap/PycharmProjects/eeg_processing/conn_example.py", line 30, in <module>
faverage=True, tmin=tmin, tmax=tmax, mt_adaptive=False, n_jobs=1)
File "<decorator-gen-387>", line 21, in spectral_connectivity
File "/home/ap/miniconda3/envs/MNE/lib/python3.7/site-packages/mne/connectivity/spectral.py", line 789, in spectral_connectivity
cwt_freqs=cwt_freqs, faverage=faverage)
File "/home/ap/miniconda3/envs/MNE/lib/python3.7/site-packages/mne/connectivity/spectral.py", line 948, in _prepare_connectivity
mask = _time_mask(times_in, tmin, tmax, sfreq=sfreq)
File "/home/ap/miniconda3/envs/MNE/lib/python3.7/site-packages/mne/utils/numerics.py", line 499, in _time_mask
% (orig_tmin, orig_tmax, extra, times[0], times[-1]))
ValueError: No samples remain when using tmin=-0.5 and tmax=-0.1 (original time bounds are [0.0, 2.4974401644798485])
|
ValueError
|
def _prepare_connectivity(
epoch_block,
times_in,
tmin,
tmax,
fmin,
fmax,
sfreq,
indices,
mode,
fskip,
n_bands,
cwt_freqs,
faverage,
):
"""Check and precompute dimensions of results data."""
rfftfreq = _import_fft("rfftfreq")
first_epoch = epoch_block[0]
# get the data size and time scale
n_signals, n_times_in, times_in = _get_and_verify_data_sizes(
first_epoch, sfreq, times=times_in
)
n_times_in = len(times_in)
if tmin is not None and tmin < times_in[0]:
warn(
"start time tmin=%0.2f s outside of the time scope of the data "
"[%0.2f s, %0.2f s]" % (tmin, times_in[0], times_in[-1])
)
if tmax is not None and tmax > times_in[-1]:
warn(
"stop time tmax=%0.2f s outside of the time scope of the data "
"[%0.2f s, %0.2f s]" % (tmax, times_in[0], times_in[-1])
)
mask = _time_mask(times_in, tmin, tmax, sfreq=sfreq)
tmin_idx, tmax_idx = np.where(mask)[0][[0, -1]]
tmax_idx += 1
tmin_true = times_in[tmin_idx]
tmax_true = times_in[tmax_idx - 1] # time of last point used
times = times_in[tmin_idx:tmax_idx]
n_times = len(times)
if indices is None:
logger.info("only using indices for lower-triangular matrix")
# only compute r for lower-triangular region
indices_use = np.tril_indices(n_signals, -1)
else:
indices_use = check_indices(indices)
# number of connectivities to compute
n_cons = len(indices_use[0])
logger.info(" computing connectivity for %d connections" % n_cons)
logger.info(
" using t=%0.3fs..%0.3fs for estimation (%d points)"
% (tmin_true, tmax_true, n_times)
)
# get frequencies of interest for the different modes
if mode in ("multitaper", "fourier"):
# fmin fmax etc is only supported for these modes
# decide which frequencies to keep
freqs_all = rfftfreq(n_times, 1.0 / sfreq)
elif mode == "cwt_morlet":
# cwt_morlet mode
if cwt_freqs is None:
raise ValueError("define frequencies of interest using cwt_freqs")
else:
cwt_freqs = cwt_freqs.astype(np.float64)
if any(cwt_freqs > (sfreq / 2.0)):
raise ValueError(
"entries in cwt_freqs cannot be larger than Nyquist (sfreq / 2)"
)
freqs_all = cwt_freqs
else:
raise ValueError("mode has an invalid value")
# check that fmin corresponds to at least 5 cycles
dur = float(n_times) / sfreq
five_cycle_freq = 5.0 / dur
if len(fmin) == 1 and fmin[0] == -np.inf:
# we use the 5 cycle freq. as default
fmin = np.array([five_cycle_freq])
else:
if np.any(fmin < five_cycle_freq):
warn(
"fmin=%0.3f Hz corresponds to %0.3f < 5 cycles "
"based on the epoch length %0.3f sec, need at least %0.3f "
"sec epochs or fmin=%0.3f. Spectrum estimate will be "
"unreliable."
% (
np.min(fmin),
dur * np.min(fmin),
dur,
5.0 / np.min(fmin),
five_cycle_freq,
)
)
# create a frequency mask for all bands
freq_mask = np.zeros(len(freqs_all), dtype=bool)
for f_lower, f_upper in zip(fmin, fmax):
freq_mask |= (freqs_all >= f_lower) & (freqs_all <= f_upper)
# possibly skip frequency points
for pos in range(fskip):
freq_mask[pos + 1 :: fskip + 1] = False
# the frequency points where we compute connectivity
freqs = freqs_all[freq_mask]
n_freqs = len(freqs)
# get the freq. indices and points for each band
freq_idx_bands = [
np.where((freqs >= fl) & (freqs <= fu))[0] for fl, fu in zip(fmin, fmax)
]
freqs_bands = [freqs[freq_idx] for freq_idx in freq_idx_bands]
# make sure we don't have empty bands
for i, n_f_band in enumerate([len(f) for f in freqs_bands]):
if n_f_band == 0:
raise ValueError(
"There are no frequency points between "
"%0.1fHz and %0.1fHz. Change the band "
"specification (fmin, fmax) or the "
"frequency resolution." % (fmin[i], fmax[i])
)
if n_bands == 1:
logger.info(
" frequencies: %0.1fHz..%0.1fHz (%d points)"
% (freqs_bands[0][0], freqs_bands[0][-1], n_freqs)
)
else:
logger.info(" computing connectivity for the bands:")
for i, bfreqs in enumerate(freqs_bands):
logger.info(
" band %d: %0.1fHz..%0.1fHz "
"(%d points)" % (i + 1, bfreqs[0], bfreqs[-1], len(bfreqs))
)
if faverage:
logger.info(" connectivity scores will be averaged for each band")
return (
n_cons,
times,
n_times,
times_in,
n_times_in,
tmin_idx,
tmax_idx,
n_freqs,
freq_mask,
freqs,
freqs_bands,
freq_idx_bands,
n_signals,
indices_use,
)
|
def _prepare_connectivity(
epoch_block,
tmin,
tmax,
fmin,
fmax,
sfreq,
indices,
mode,
fskip,
n_bands,
cwt_freqs,
faverage,
):
"""Check and precompute dimensions of results data."""
rfftfreq = _import_fft("rfftfreq")
first_epoch = epoch_block[0]
# get the data size and time scale
n_signals, n_times_in, times_in = _get_and_verify_data_sizes(first_epoch)
if times_in is None:
# we are not using Epochs or SourceEstimate(s) as input
times_in = np.linspace(0.0, n_times_in / sfreq, n_times_in, endpoint=False)
n_times_in = len(times_in)
mask = _time_mask(times_in, tmin, tmax, sfreq=sfreq)
tmin_idx, tmax_idx = np.where(mask)[0][[0, -1]]
tmax_idx += 1
tmin_true = times_in[tmin_idx]
tmax_true = times_in[tmax_idx - 1] # time of last point used
times = times_in[tmin_idx:tmax_idx]
n_times = len(times)
if indices is None:
logger.info("only using indices for lower-triangular matrix")
# only compute r for lower-triangular region
indices_use = np.tril_indices(n_signals, -1)
else:
indices_use = check_indices(indices)
# number of connectivities to compute
n_cons = len(indices_use[0])
logger.info(" computing connectivity for %d connections" % n_cons)
logger.info(
" using t=%0.3fs..%0.3fs for estimation (%d points)"
% (tmin_true, tmax_true, n_times)
)
# get frequencies of interest for the different modes
if mode in ("multitaper", "fourier"):
# fmin fmax etc is only supported for these modes
# decide which frequencies to keep
freqs_all = rfftfreq(n_times, 1.0 / sfreq)
elif mode == "cwt_morlet":
# cwt_morlet mode
if cwt_freqs is None:
raise ValueError("define frequencies of interest using cwt_freqs")
else:
cwt_freqs = cwt_freqs.astype(np.float64)
if any(cwt_freqs > (sfreq / 2.0)):
raise ValueError(
"entries in cwt_freqs cannot be larger than Nyquist (sfreq / 2)"
)
freqs_all = cwt_freqs
else:
raise ValueError("mode has an invalid value")
# check that fmin corresponds to at least 5 cycles
dur = float(n_times) / sfreq
five_cycle_freq = 5.0 / dur
if len(fmin) == 1 and fmin[0] == -np.inf:
# we use the 5 cycle freq. as default
fmin = np.array([five_cycle_freq])
else:
if np.any(fmin < five_cycle_freq):
warn(
"fmin=%0.3f Hz corresponds to %0.3f < 5 cycles "
"based on the epoch length %0.3f sec, need at least %0.3f "
"sec epochs or fmin=%0.3f. Spectrum estimate will be "
"unreliable."
% (
np.min(fmin),
dur * np.min(fmin),
dur,
5.0 / np.min(fmin),
five_cycle_freq,
)
)
# create a frequency mask for all bands
freq_mask = np.zeros(len(freqs_all), dtype=bool)
for f_lower, f_upper in zip(fmin, fmax):
freq_mask |= (freqs_all >= f_lower) & (freqs_all <= f_upper)
# possibly skip frequency points
for pos in range(fskip):
freq_mask[pos + 1 :: fskip + 1] = False
# the frequency points where we compute connectivity
freqs = freqs_all[freq_mask]
n_freqs = len(freqs)
# get the freq. indices and points for each band
freq_idx_bands = [
np.where((freqs >= fl) & (freqs <= fu))[0] for fl, fu in zip(fmin, fmax)
]
freqs_bands = [freqs[freq_idx] for freq_idx in freq_idx_bands]
# make sure we don't have empty bands
for i, n_f_band in enumerate([len(f) for f in freqs_bands]):
if n_f_band == 0:
raise ValueError(
"There are no frequency points between "
"%0.1fHz and %0.1fHz. Change the band "
"specification (fmin, fmax) or the "
"frequency resolution." % (fmin[i], fmax[i])
)
if n_bands == 1:
logger.info(
" frequencies: %0.1fHz..%0.1fHz (%d points)"
% (freqs_bands[0][0], freqs_bands[0][-1], n_freqs)
)
else:
logger.info(" computing connectivity for the bands:")
for i, bfreqs in enumerate(freqs_bands):
logger.info(
" band %d: %0.1fHz..%0.1fHz "
"(%d points)" % (i + 1, bfreqs[0], bfreqs[-1], len(bfreqs))
)
if faverage:
logger.info(" connectivity scores will be averaged for each band")
return (
n_cons,
times,
n_times,
times_in,
n_times_in,
tmin_idx,
tmax_idx,
n_freqs,
freq_mask,
freqs,
freqs_bands,
freq_idx_bands,
n_signals,
indices_use,
)
|
https://github.com/mne-tools/mne-python/issues/8824
|
Connectivity computation...
Traceback (most recent call last):
File "/home/ap/PycharmProjects/eeg_processing/conn_example.py", line 30, in <module>
faverage=True, tmin=tmin, tmax=tmax, mt_adaptive=False, n_jobs=1)
File "<decorator-gen-387>", line 21, in spectral_connectivity
File "/home/ap/miniconda3/envs/MNE/lib/python3.7/site-packages/mne/connectivity/spectral.py", line 789, in spectral_connectivity
cwt_freqs=cwt_freqs, faverage=faverage)
File "/home/ap/miniconda3/envs/MNE/lib/python3.7/site-packages/mne/connectivity/spectral.py", line 948, in _prepare_connectivity
mask = _time_mask(times_in, tmin, tmax, sfreq=sfreq)
File "/home/ap/miniconda3/envs/MNE/lib/python3.7/site-packages/mne/utils/numerics.py", line 499, in _time_mask
% (orig_tmin, orig_tmax, extra, times[0], times[-1]))
ValueError: No samples remain when using tmin=-0.5 and tmax=-0.1 (original time bounds are [0.0, 2.4974401644798485])
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.