id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
225,000
DataBiosphere/toil
src/toil/lib/misc.py
mean
def mean(xs): """ Return the mean value of a sequence of values. >>> mean([2,4,4,4,5,5,7,9]) 5.0 >>> mean([9,10,11,7,13]) 10.0 >>> mean([1,1,10,19,19]) 10.0 >>> mean([10,10,10,10,10]) 10.0 >>> mean([1,"b"]) Traceback (most recent call last): ... ValueError: Input can't have non-numeric elements >>> mean([]) Traceback (most recent call last): ... ValueError: Input can't be empty """ try: return sum(xs) / float(len(xs)) except TypeError: raise ValueError("Input can't have non-numeric elements") except ZeroDivisionError: raise ValueError("Input can't be empty")
python
def mean(xs): try: return sum(xs) / float(len(xs)) except TypeError: raise ValueError("Input can't have non-numeric elements") except ZeroDivisionError: raise ValueError("Input can't be empty")
[ "def", "mean", "(", "xs", ")", ":", "try", ":", "return", "sum", "(", "xs", ")", "/", "float", "(", "len", "(", "xs", ")", ")", "except", "TypeError", ":", "raise", "ValueError", "(", "\"Input can't have non-numeric elements\"", ")", "except", "ZeroDivisio...
Return the mean value of a sequence of values. >>> mean([2,4,4,4,5,5,7,9]) 5.0 >>> mean([9,10,11,7,13]) 10.0 >>> mean([1,1,10,19,19]) 10.0 >>> mean([10,10,10,10,10]) 10.0 >>> mean([1,"b"]) Traceback (most recent call last): ... ValueError: Input can't have non-numeric elements >>> mean([]) Traceback (most recent call last): ... ValueError: Input can't be empty
[ "Return", "the", "mean", "value", "of", "a", "sequence", "of", "values", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/misc.py#L19-L45
225,001
DataBiosphere/toil
src/toil/lib/misc.py
std_dev
def std_dev(xs): """ Returns the standard deviation of the given iterable of numbers. From http://rosettacode.org/wiki/Standard_deviation#Python An empty list, or a list with non-numeric elements will raise a TypeError. >>> std_dev([2,4,4,4,5,5,7,9]) 2.0 >>> std_dev([9,10,11,7,13]) 2.0 >>> std_dev([1,1,10,19,19]) 8.049844718999243 >>> std_dev({1,1,10,19,19}) == std_dev({19,10,1}) True >>> std_dev([10,10,10,10,10]) 0.0 >>> std_dev([1,"b"]) Traceback (most recent call last): ... ValueError: Input can't have non-numeric elements >>> std_dev([]) Traceback (most recent call last): ... ValueError: Input can't be empty """ m = mean(xs) # this checks our pre-conditions, too return sqrt(sum((x - m) ** 2 for x in xs) / float(len(xs)))
python
def std_dev(xs): m = mean(xs) # this checks our pre-conditions, too return sqrt(sum((x - m) ** 2 for x in xs) / float(len(xs)))
[ "def", "std_dev", "(", "xs", ")", ":", "m", "=", "mean", "(", "xs", ")", "# this checks our pre-conditions, too", "return", "sqrt", "(", "sum", "(", "(", "x", "-", "m", ")", "**", "2", "for", "x", "in", "xs", ")", "/", "float", "(", "len", "(", "...
Returns the standard deviation of the given iterable of numbers. From http://rosettacode.org/wiki/Standard_deviation#Python An empty list, or a list with non-numeric elements will raise a TypeError. >>> std_dev([2,4,4,4,5,5,7,9]) 2.0 >>> std_dev([9,10,11,7,13]) 2.0 >>> std_dev([1,1,10,19,19]) 8.049844718999243 >>> std_dev({1,1,10,19,19}) == std_dev({19,10,1}) True >>> std_dev([10,10,10,10,10]) 0.0 >>> std_dev([1,"b"]) Traceback (most recent call last): ... ValueError: Input can't have non-numeric elements >>> std_dev([]) Traceback (most recent call last): ... ValueError: Input can't be empty
[ "Returns", "the", "standard", "deviation", "of", "the", "given", "iterable", "of", "numbers", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/misc.py#L48-L82
225,002
DataBiosphere/toil
src/toil/lib/misc.py
partition_seq
def partition_seq(seq, size): """ Splits a sequence into an iterable of subsequences. All subsequences are of the given size, except the last one, which may be smaller. If the input list is modified while the returned list is processed, the behavior of the program is undefined. :param seq: the list to split :param size: the desired size of the sublists, must be > 0 :type size: int :return: an iterable of sublists >>> list(partition_seq("",1)) [] >>> list(partition_seq("abcde",2)) ['ab', 'cd', 'e'] >>> list(partition_seq("abcd",2)) ['ab', 'cd'] >>> list(partition_seq("abcde",1)) ['a', 'b', 'c', 'd', 'e'] >>> list(partition_seq("abcde",0)) Traceback (most recent call last): ... ValueError: Size must be greater than 0 >>> l=[1,2,3,4] >>> i = iter( partition_seq(l,2) ) >>> l.pop(0) 1 >>> next(i) [2, 3] """ if size < 1: raise ValueError('Size must be greater than 0') return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
python
def partition_seq(seq, size): if size < 1: raise ValueError('Size must be greater than 0') return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
[ "def", "partition_seq", "(", "seq", ",", "size", ")", ":", "if", "size", "<", "1", ":", "raise", "ValueError", "(", "'Size must be greater than 0'", ")", "return", "(", "seq", "[", "pos", ":", "pos", "+", "size", "]", "for", "pos", "in", "xrange", "(",...
Splits a sequence into an iterable of subsequences. All subsequences are of the given size, except the last one, which may be smaller. If the input list is modified while the returned list is processed, the behavior of the program is undefined. :param seq: the list to split :param size: the desired size of the sublists, must be > 0 :type size: int :return: an iterable of sublists >>> list(partition_seq("",1)) [] >>> list(partition_seq("abcde",2)) ['ab', 'cd', 'e'] >>> list(partition_seq("abcd",2)) ['ab', 'cd'] >>> list(partition_seq("abcde",1)) ['a', 'b', 'c', 'd', 'e'] >>> list(partition_seq("abcde",0)) Traceback (most recent call last): ... ValueError: Size must be greater than 0 >>> l=[1,2,3,4] >>> i = iter( partition_seq(l,2) ) >>> l.pop(0) 1 >>> next(i) [2, 3]
[ "Splits", "a", "sequence", "into", "an", "iterable", "of", "subsequences", ".", "All", "subsequences", "are", "of", "the", "given", "size", "except", "the", "last", "one", "which", "may", "be", "smaller", ".", "If", "the", "input", "list", "is", "modified"...
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/misc.py#L85-L117
225,003
DataBiosphere/toil
src/toil/lib/fnmatch.py
filter
def filter( names, pat ): """Return the subset of the list NAMES that match PAT""" import os, posixpath result = [ ] pat = os.path.normcase( pat ) if not pat in _cache: res = translate( pat ) if len( _cache ) >= _MAXCACHE: _cache.clear( ) _cache[ pat ] = re.compile( res ) match = _cache[ pat ].match if os.path is posixpath: # normcase on posix is NOP. Optimize it away from the loop. for name in names: if match( name ): result.append( name ) else: for name in names: if match( os.path.normcase( name ) ): result.append( name ) return result
python
def filter( names, pat ): import os, posixpath result = [ ] pat = os.path.normcase( pat ) if not pat in _cache: res = translate( pat ) if len( _cache ) >= _MAXCACHE: _cache.clear( ) _cache[ pat ] = re.compile( res ) match = _cache[ pat ].match if os.path is posixpath: # normcase on posix is NOP. Optimize it away from the loop. for name in names: if match( name ): result.append( name ) else: for name in names: if match( os.path.normcase( name ) ): result.append( name ) return result
[ "def", "filter", "(", "names", ",", "pat", ")", ":", "import", "os", ",", "posixpath", "result", "=", "[", "]", "pat", "=", "os", ".", "path", ".", "normcase", "(", "pat", ")", "if", "not", "pat", "in", "_cache", ":", "res", "=", "translate", "("...
Return the subset of the list NAMES that match PAT
[ "Return", "the", "subset", "of", "the", "list", "NAMES", "that", "match", "PAT" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/fnmatch.py#L86-L107
225,004
DataBiosphere/toil
src/toil/provisioners/azure/azureProvisioner.py
AzureProvisioner._readClusterSettings
def _readClusterSettings(self): """ Read the current instance's meta-data to get the cluster settings. """ # get the leader metadata mdUrl = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" header = {'Metadata': 'True'} request = urllib.request.Request(url=mdUrl, headers=header) response = urllib.request.urlopen(request) data = response.read() dataStr = data.decode("utf-8") metadata = json.loads(dataStr) # set values from the leader meta-data self._zone = metadata['compute']['location'] self.clusterName = metadata['compute']['resourceGroupName'] tagsStr = metadata['compute']['tags'] tags = dict(item.split(":") for item in tagsStr.split(";")) self._owner = tags.get('owner', 'no-owner') leader = self.getLeader() self._leaderPrivateIP = leader.privateIP self._setSSH() # create id_rsa.pub file on the leader if it is not there self._masterPublicKeyFile = self.LEADER_HOME_DIR + '.ssh/id_rsa.pub' # Add static nodes to /etc/hosts since Azure sometimes fails to find them with DNS map(lambda x: self._addToHosts(x), self.getProvisionedWorkers(None))
python
def _readClusterSettings(self): # get the leader metadata mdUrl = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" header = {'Metadata': 'True'} request = urllib.request.Request(url=mdUrl, headers=header) response = urllib.request.urlopen(request) data = response.read() dataStr = data.decode("utf-8") metadata = json.loads(dataStr) # set values from the leader meta-data self._zone = metadata['compute']['location'] self.clusterName = metadata['compute']['resourceGroupName'] tagsStr = metadata['compute']['tags'] tags = dict(item.split(":") for item in tagsStr.split(";")) self._owner = tags.get('owner', 'no-owner') leader = self.getLeader() self._leaderPrivateIP = leader.privateIP self._setSSH() # create id_rsa.pub file on the leader if it is not there self._masterPublicKeyFile = self.LEADER_HOME_DIR + '.ssh/id_rsa.pub' # Add static nodes to /etc/hosts since Azure sometimes fails to find them with DNS map(lambda x: self._addToHosts(x), self.getProvisionedWorkers(None))
[ "def", "_readClusterSettings", "(", "self", ")", ":", "# get the leader metadata", "mdUrl", "=", "\"http://169.254.169.254/metadata/instance?api-version=2017-08-01\"", "header", "=", "{", "'Metadata'", ":", "'True'", "}", "request", "=", "urllib", ".", "request", ".", "...
Read the current instance's meta-data to get the cluster settings.
[ "Read", "the", "current", "instance", "s", "meta", "-", "data", "to", "get", "the", "cluster", "settings", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/azure/azureProvisioner.py#L89-L114
225,005
DataBiosphere/toil
src/toil/provisioners/azure/azureProvisioner.py
AzureProvisioner.launchCluster
def launchCluster(self, leaderNodeType, leaderStorage, owner, **kwargs): """ Launches an Azure cluster using Ansible. A resource group is created for the cluster. All the virtual machines are created within this resource group. Cloud-config is called during vm creation to create directories and launch the appliance. The azureStorageCredentials must be passed in kwargs. These credentials allow access to Azure jobStores. """ self._owner = owner self._masterPublicKeyFile = kwargs['publicKeyFile'] if not self._masterPublicKeyFile: raise RuntimeError("The Azure provisioner requires a public key file.") storageCredentials = kwargs['azureStorageCredentials'] if not storageCredentials: raise RuntimeError("azureStorageCredentials must be given.") self._checkValidClusterName() self._checkIfClusterExists() # Create the cluster. clusterArgs = { 'resgrp': self.clusterName, # The resource group, which represents the cluster. 'region': self._zone } self.callPlaybook(self.playbook['create-cluster'], clusterArgs, wait=True) ansibleArgs = { 'vmsize': leaderNodeType, 'resgrp': self.clusterName, # The resource group, which represents the cluster. 'region': self._zone, 'role': "leader", 'owner': self._owner, # Just a tag. 'diskSize': str(leaderStorage), # TODO: not implemented 'publickeyfile': self._masterPublicKeyFile # The users public key to be added to authorized_keys } # Ansible reads the cloud-config script from a file. with tempfile.NamedTemporaryFile(delete=False) as t: userData = self._getCloudConfigUserData('leader') t.write(userData) ansibleArgs['cloudconfig'] = t.name # Launch the leader VM. retries = 0 while True: instanceName = 'l' + str(uuid.uuid4()) ansibleArgs['vmname'] = instanceName # Azure limits the name to 24 characters, no dashes. ansibleArgs['storagename'] = instanceName.replace('-', '')[:24] self.callPlaybook(self.playbook['create'], ansibleArgs, wait=True) try: leaderNode = self.getLeader() except IndexError: raise RuntimeError("Failed to launcher leader") self._leaderPrivateIP = leaderNode.privateIP # IP available as soon as the playbook finishes try: # Fix for DNS failure. self._addToHosts(leaderNode, leaderNode.publicIP) leaderNode.waitForNode('toil_leader') # Make sure leader appliance is up. # Transfer credentials if storageCredentials is not None: fullPathCredentials = os.path.expanduser(storageCredentials) if os.path.isfile(fullPathCredentials): leaderNode.injectFile(fullPathCredentials, self.LEADER_HOME_DIR, 'toil_leader') ansibleCredentials = '.azure/credentials' fullPathAnsibleCredentials = os.path.expanduser('~/' + ansibleCredentials) if os.path.isfile(fullPathAnsibleCredentials): leaderNode.sshAppliance('mkdir', '-p', self.LEADER_HOME_DIR + '.azure') leaderNode.injectFile(fullPathAnsibleCredentials, self.LEADER_HOME_DIR + ansibleCredentials, 'toil_leader') break #success! except RuntimeError as e: self._terminateNode(instanceName, False) # remove failed leader retries += 1 if retries == 3: logger.debug("Leader appliance failed to start. Giving up.") raise e logger.debug("Leader appliance failed to start, retrying. (Error %s)" % e) logger.debug('Launched leader')
python
def launchCluster(self, leaderNodeType, leaderStorage, owner, **kwargs): self._owner = owner self._masterPublicKeyFile = kwargs['publicKeyFile'] if not self._masterPublicKeyFile: raise RuntimeError("The Azure provisioner requires a public key file.") storageCredentials = kwargs['azureStorageCredentials'] if not storageCredentials: raise RuntimeError("azureStorageCredentials must be given.") self._checkValidClusterName() self._checkIfClusterExists() # Create the cluster. clusterArgs = { 'resgrp': self.clusterName, # The resource group, which represents the cluster. 'region': self._zone } self.callPlaybook(self.playbook['create-cluster'], clusterArgs, wait=True) ansibleArgs = { 'vmsize': leaderNodeType, 'resgrp': self.clusterName, # The resource group, which represents the cluster. 'region': self._zone, 'role': "leader", 'owner': self._owner, # Just a tag. 'diskSize': str(leaderStorage), # TODO: not implemented 'publickeyfile': self._masterPublicKeyFile # The users public key to be added to authorized_keys } # Ansible reads the cloud-config script from a file. with tempfile.NamedTemporaryFile(delete=False) as t: userData = self._getCloudConfigUserData('leader') t.write(userData) ansibleArgs['cloudconfig'] = t.name # Launch the leader VM. retries = 0 while True: instanceName = 'l' + str(uuid.uuid4()) ansibleArgs['vmname'] = instanceName # Azure limits the name to 24 characters, no dashes. ansibleArgs['storagename'] = instanceName.replace('-', '')[:24] self.callPlaybook(self.playbook['create'], ansibleArgs, wait=True) try: leaderNode = self.getLeader() except IndexError: raise RuntimeError("Failed to launcher leader") self._leaderPrivateIP = leaderNode.privateIP # IP available as soon as the playbook finishes try: # Fix for DNS failure. self._addToHosts(leaderNode, leaderNode.publicIP) leaderNode.waitForNode('toil_leader') # Make sure leader appliance is up. # Transfer credentials if storageCredentials is not None: fullPathCredentials = os.path.expanduser(storageCredentials) if os.path.isfile(fullPathCredentials): leaderNode.injectFile(fullPathCredentials, self.LEADER_HOME_DIR, 'toil_leader') ansibleCredentials = '.azure/credentials' fullPathAnsibleCredentials = os.path.expanduser('~/' + ansibleCredentials) if os.path.isfile(fullPathAnsibleCredentials): leaderNode.sshAppliance('mkdir', '-p', self.LEADER_HOME_DIR + '.azure') leaderNode.injectFile(fullPathAnsibleCredentials, self.LEADER_HOME_DIR + ansibleCredentials, 'toil_leader') break #success! except RuntimeError as e: self._terminateNode(instanceName, False) # remove failed leader retries += 1 if retries == 3: logger.debug("Leader appliance failed to start. Giving up.") raise e logger.debug("Leader appliance failed to start, retrying. (Error %s)" % e) logger.debug('Launched leader')
[ "def", "launchCluster", "(", "self", ",", "leaderNodeType", ",", "leaderStorage", ",", "owner", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_owner", "=", "owner", "self", ".", "_masterPublicKeyFile", "=", "kwargs", "[", "'publicKeyFile'", "]", "if", "...
Launches an Azure cluster using Ansible. A resource group is created for the cluster. All the virtual machines are created within this resource group. Cloud-config is called during vm creation to create directories and launch the appliance. The azureStorageCredentials must be passed in kwargs. These credentials allow access to Azure jobStores.
[ "Launches", "an", "Azure", "cluster", "using", "Ansible", ".", "A", "resource", "group", "is", "created", "for", "the", "cluster", ".", "All", "the", "virtual", "machines", "are", "created", "within", "this", "resource", "group", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/azure/azureProvisioner.py#L117-L204
225,006
DataBiosphere/toil
src/toil/provisioners/azure/azureProvisioner.py
AzureProvisioner._checkIfClusterExists
def _checkIfClusterExists(self): """ Try deleting the resource group. This will fail if it exists and raise an exception. """ ansibleArgs = { 'resgrp': self.clusterName, 'region': self._zone } try: self.callPlaybook(self.playbook['check-cluster'], ansibleArgs, wait=True) except RuntimeError: logger.info("The cluster could not be created. Try deleting the cluster if it already exits.") raise
python
def _checkIfClusterExists(self): ansibleArgs = { 'resgrp': self.clusterName, 'region': self._zone } try: self.callPlaybook(self.playbook['check-cluster'], ansibleArgs, wait=True) except RuntimeError: logger.info("The cluster could not be created. Try deleting the cluster if it already exits.") raise
[ "def", "_checkIfClusterExists", "(", "self", ")", ":", "ansibleArgs", "=", "{", "'resgrp'", ":", "self", ".", "clusterName", ",", "'region'", ":", "self", ".", "_zone", "}", "try", ":", "self", ".", "callPlaybook", "(", "self", ".", "playbook", "[", "'ch...
Try deleting the resource group. This will fail if it exists and raise an exception.
[ "Try", "deleting", "the", "resource", "group", ".", "This", "will", "fail", "if", "it", "exists", "and", "raise", "an", "exception", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/azure/azureProvisioner.py#L206-L218
225,007
DataBiosphere/toil
src/toil/lib/encryption/_nacl.py
encrypt
def encrypt(message, keyPath): """ Encrypts a message given a path to a local file containing a key. :param message: The message to be encrypted. :param keyPath: A path to a file containing a 256-bit key (and nothing else). :type message: bytes :type keyPath: str :rtype: bytes A constant overhead is added to every encrypted message (for the nonce and MAC). >>> import tempfile >>> k = tempfile.mktemp() >>> with open(k, 'wb') as f: ... _ = f.write(nacl.utils.random(SecretBox.KEY_SIZE)) >>> message = 'test'.encode('utf-8') >>> len(encrypt(message, k)) == overhead + len(message) True >>> import os >>> os.remove(k) """ with open(keyPath, 'rb') as f: key = f.read() if len(key) != SecretBox.KEY_SIZE: raise ValueError("Key is %d bytes, but must be exactly %d bytes" % (len(key), SecretBox.KEY_SIZE)) sb = SecretBox(key) # We generate the nonce using secure random bits. For long enough # nonce size, the chance of a random nonce collision becomes # *much* smaller than the chance of a subtle coding error causing # a nonce reuse. Currently the nonce size is 192 bits--the chance # of a collision is astronomically low. (This approach is # recommended in the libsodium documentation.) nonce = nacl.utils.random(SecretBox.NONCE_SIZE) assert len(nonce) == SecretBox.NONCE_SIZE return bytes(sb.encrypt(message, nonce))
python
def encrypt(message, keyPath): with open(keyPath, 'rb') as f: key = f.read() if len(key) != SecretBox.KEY_SIZE: raise ValueError("Key is %d bytes, but must be exactly %d bytes" % (len(key), SecretBox.KEY_SIZE)) sb = SecretBox(key) # We generate the nonce using secure random bits. For long enough # nonce size, the chance of a random nonce collision becomes # *much* smaller than the chance of a subtle coding error causing # a nonce reuse. Currently the nonce size is 192 bits--the chance # of a collision is astronomically low. (This approach is # recommended in the libsodium documentation.) nonce = nacl.utils.random(SecretBox.NONCE_SIZE) assert len(nonce) == SecretBox.NONCE_SIZE return bytes(sb.encrypt(message, nonce))
[ "def", "encrypt", "(", "message", ",", "keyPath", ")", ":", "with", "open", "(", "keyPath", ",", "'rb'", ")", "as", "f", ":", "key", "=", "f", ".", "read", "(", ")", "if", "len", "(", "key", ")", "!=", "SecretBox", ".", "KEY_SIZE", ":", "raise", ...
Encrypts a message given a path to a local file containing a key. :param message: The message to be encrypted. :param keyPath: A path to a file containing a 256-bit key (and nothing else). :type message: bytes :type keyPath: str :rtype: bytes A constant overhead is added to every encrypted message (for the nonce and MAC). >>> import tempfile >>> k = tempfile.mktemp() >>> with open(k, 'wb') as f: ... _ = f.write(nacl.utils.random(SecretBox.KEY_SIZE)) >>> message = 'test'.encode('utf-8') >>> len(encrypt(message, k)) == overhead + len(message) True >>> import os >>> os.remove(k)
[ "Encrypts", "a", "message", "given", "a", "path", "to", "a", "local", "file", "containing", "a", "key", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/encryption/_nacl.py#L23-L58
225,008
DataBiosphere/toil
src/toil/lib/memoize.py
memoize
def memoize(f): """ A decorator that memoizes a function result based on its parameters. For example, this can be used in place of lazy initialization. If the decorating function is invoked by multiple threads, the decorated function may be called more than once with the same arguments. """ # TODO: Recommend that f's arguments be immutable memory = {} @wraps(f) def new_f(*args): try: return memory[args] except KeyError: r = f(*args) memory[args] = r return r return new_f
python
def memoize(f): # TODO: Recommend that f's arguments be immutable memory = {} @wraps(f) def new_f(*args): try: return memory[args] except KeyError: r = f(*args) memory[args] = r return r return new_f
[ "def", "memoize", "(", "f", ")", ":", "# TODO: Recommend that f's arguments be immutable", "memory", "=", "{", "}", "@", "wraps", "(", "f", ")", "def", "new_f", "(", "*", "args", ")", ":", "try", ":", "return", "memory", "[", "args", "]", "except", "KeyE...
A decorator that memoizes a function result based on its parameters. For example, this can be used in place of lazy initialization. If the decorating function is invoked by multiple threads, the decorated function may be called more than once with the same arguments.
[ "A", "decorator", "that", "memoizes", "a", "function", "result", "based", "on", "its", "parameters", ".", "For", "example", "this", "can", "be", "used", "in", "place", "of", "lazy", "initialization", ".", "If", "the", "decorating", "function", "is", "invoked...
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/memoize.py#L23-L41
225,009
DataBiosphere/toil
src/toil/lib/memoize.py
sync_memoize
def sync_memoize(f): """ Like memoize, but guarantees that decorated function is only called once, even when multiple threads are calling the decorating function with multiple parameters. """ # TODO: Think about an f that is recursive memory = {} lock = Lock() @wraps(f) def new_f(*args): try: return memory[args] except KeyError: # on cache misses, retry with lock held with lock: try: return memory[args] except KeyError: r = f(*args) memory[args] = r return r return new_f
python
def sync_memoize(f): # TODO: Think about an f that is recursive memory = {} lock = Lock() @wraps(f) def new_f(*args): try: return memory[args] except KeyError: # on cache misses, retry with lock held with lock: try: return memory[args] except KeyError: r = f(*args) memory[args] = r return r return new_f
[ "def", "sync_memoize", "(", "f", ")", ":", "# TODO: Think about an f that is recursive", "memory", "=", "{", "}", "lock", "=", "Lock", "(", ")", "@", "wraps", "(", "f", ")", "def", "new_f", "(", "*", "args", ")", ":", "try", ":", "return", "memory", "[...
Like memoize, but guarantees that decorated function is only called once, even when multiple threads are calling the decorating function with multiple parameters.
[ "Like", "memoize", "but", "guarantees", "that", "decorated", "function", "is", "only", "called", "once", "even", "when", "multiple", "threads", "are", "calling", "the", "decorating", "function", "with", "multiple", "parameters", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/memoize.py#L44-L66
225,010
DataBiosphere/toil
src/toil/lib/memoize.py
less_strict_bool
def less_strict_bool(x): """Idempotent and None-safe version of strict_bool.""" if x is None: return False elif x is True or x is False: return x else: return strict_bool(x)
python
def less_strict_bool(x): if x is None: return False elif x is True or x is False: return x else: return strict_bool(x)
[ "def", "less_strict_bool", "(", "x", ")", ":", "if", "x", "is", "None", ":", "return", "False", "elif", "x", "is", "True", "or", "x", "is", "False", ":", "return", "x", "else", ":", "return", "strict_bool", "(", "x", ")" ]
Idempotent and None-safe version of strict_bool.
[ "Idempotent", "and", "None", "-", "safe", "version", "of", "strict_bool", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/memoize.py#L190-L197
225,011
DataBiosphere/toil
attic/toil-sort-example.py
setup
def setup(job, input_file_id, n, down_checkpoints): """Sets up the sort. Returns the FileID of the sorted file """ # Write the input file to the file store job.fileStore.logToMaster("Starting the merge sort") return job.addChildJobFn(down, input_file_id, n, down_checkpoints=down_checkpoints, memory='600M').rv()
python
def setup(job, input_file_id, n, down_checkpoints): # Write the input file to the file store job.fileStore.logToMaster("Starting the merge sort") return job.addChildJobFn(down, input_file_id, n, down_checkpoints=down_checkpoints, memory='600M').rv()
[ "def", "setup", "(", "job", ",", "input_file_id", ",", "n", ",", "down_checkpoints", ")", ":", "# Write the input file to the file store", "job", ".", "fileStore", ".", "logToMaster", "(", "\"Starting the merge sort\"", ")", "return", "job", ".", "addChildJobFn", "(...
Sets up the sort. Returns the FileID of the sorted file
[ "Sets", "up", "the", "sort", ".", "Returns", "the", "FileID", "of", "the", "sorted", "file" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/attic/toil-sort-example.py#L12-L21
225,012
DataBiosphere/toil
attic/toil-sort-example.py
down
def down(job, input_file_id, n, down_checkpoints): """Input is a file and a range into that file to sort and an output location in which to write the sorted file. If the range is larger than a threshold N the range is divided recursively and a follow on job is then created which merges back the results. Otherwise, the file is sorted and placed in the output. """ # Read the file input_file = job.fileStore.readGlobalFile(input_file_id, cache=False) length = os.path.getsize(input_file) if length > n: # We will subdivide the file job.fileStore.logToMaster("Splitting file: %s of size: %s" % (input_file_id, length), level=logging.CRITICAL) # Split the file into two copies mid_point = get_midpoint(input_file, 0, length) t1 = job.fileStore.getLocalTempFile() with open(t1, 'w') as fH: copy_subrange_of_file(input_file, 0, mid_point + 1, fH) t2 = job.fileStore.getLocalTempFile() with open(t2, 'w') as fH: copy_subrange_of_file(input_file, mid_point + 1, length, fH) # Call the down function recursively return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n, down_checkpoints=down_checkpoints, memory='600M').rv(), job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n, down_checkpoints=down_checkpoints, memory='600M').rv()).rv() else: # We can sort this bit of the file job.fileStore.logToMaster("Sorting file: %s of size: %s" % (input_file_id, length), level=logging.CRITICAL) # Sort the copy and write back to the fileStore output_file = job.fileStore.getLocalTempFile() sort(input_file, output_file) return job.fileStore.writeGlobalFile(output_file)
python
def down(job, input_file_id, n, down_checkpoints): # Read the file input_file = job.fileStore.readGlobalFile(input_file_id, cache=False) length = os.path.getsize(input_file) if length > n: # We will subdivide the file job.fileStore.logToMaster("Splitting file: %s of size: %s" % (input_file_id, length), level=logging.CRITICAL) # Split the file into two copies mid_point = get_midpoint(input_file, 0, length) t1 = job.fileStore.getLocalTempFile() with open(t1, 'w') as fH: copy_subrange_of_file(input_file, 0, mid_point + 1, fH) t2 = job.fileStore.getLocalTempFile() with open(t2, 'w') as fH: copy_subrange_of_file(input_file, mid_point + 1, length, fH) # Call the down function recursively return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n, down_checkpoints=down_checkpoints, memory='600M').rv(), job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n, down_checkpoints=down_checkpoints, memory='600M').rv()).rv() else: # We can sort this bit of the file job.fileStore.logToMaster("Sorting file: %s of size: %s" % (input_file_id, length), level=logging.CRITICAL) # Sort the copy and write back to the fileStore output_file = job.fileStore.getLocalTempFile() sort(input_file, output_file) return job.fileStore.writeGlobalFile(output_file)
[ "def", "down", "(", "job", ",", "input_file_id", ",", "n", ",", "down_checkpoints", ")", ":", "# Read the file", "input_file", "=", "job", ".", "fileStore", ".", "readGlobalFile", "(", "input_file_id", ",", "cache", "=", "False", ")", "length", "=", "os", ...
Input is a file and a range into that file to sort and an output location in which to write the sorted file. If the range is larger than a threshold N the range is divided recursively and a follow on job is then created which merges back the results. Otherwise, the file is sorted and placed in the output.
[ "Input", "is", "a", "file", "and", "a", "range", "into", "that", "file", "to", "sort", "and", "an", "output", "location", "in", "which", "to", "write", "the", "sorted", "file", ".", "If", "the", "range", "is", "larger", "than", "a", "threshold", "N", ...
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/attic/toil-sort-example.py#L24-L60
225,013
DataBiosphere/toil
attic/toil-sort-example.py
up
def up(job, input_file_id_1, input_file_id_2): """Merges the two files and places them in the output. """ with job.fileStore.writeGlobalFileStream() as (fileHandle, output_id): with job.fileStore.readGlobalFileStream(input_file_id_1) as inputFileHandle1: with job.fileStore.readGlobalFileStream(input_file_id_2) as inputFileHandle2: job.fileStore.logToMaster("Merging %s and %s to %s" % (input_file_id_1, input_file_id_2, output_id)) merge(inputFileHandle1, inputFileHandle2, fileHandle) # Cleanup up the input files - these deletes will occur after the completion is successful. job.fileStore.deleteGlobalFile(input_file_id_1) job.fileStore.deleteGlobalFile(input_file_id_2) return output_id
python
def up(job, input_file_id_1, input_file_id_2): with job.fileStore.writeGlobalFileStream() as (fileHandle, output_id): with job.fileStore.readGlobalFileStream(input_file_id_1) as inputFileHandle1: with job.fileStore.readGlobalFileStream(input_file_id_2) as inputFileHandle2: job.fileStore.logToMaster("Merging %s and %s to %s" % (input_file_id_1, input_file_id_2, output_id)) merge(inputFileHandle1, inputFileHandle2, fileHandle) # Cleanup up the input files - these deletes will occur after the completion is successful. job.fileStore.deleteGlobalFile(input_file_id_1) job.fileStore.deleteGlobalFile(input_file_id_2) return output_id
[ "def", "up", "(", "job", ",", "input_file_id_1", ",", "input_file_id_2", ")", ":", "with", "job", ".", "fileStore", ".", "writeGlobalFileStream", "(", ")", "as", "(", "fileHandle", ",", "output_id", ")", ":", "with", "job", ".", "fileStore", ".", "readGlob...
Merges the two files and places them in the output.
[ "Merges", "the", "two", "files", "and", "places", "them", "in", "the", "output", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/attic/toil-sort-example.py#L63-L76
225,014
DataBiosphere/toil
attic/toil-sort-example.py
sort
def sort(in_file, out_file): """Sorts the given file. """ filehandle = open(in_file, 'r') lines = filehandle.readlines() filehandle.close() lines.sort() filehandle = open(out_file, 'w') for line in lines: filehandle.write(line) filehandle.close()
python
def sort(in_file, out_file): filehandle = open(in_file, 'r') lines = filehandle.readlines() filehandle.close() lines.sort() filehandle = open(out_file, 'w') for line in lines: filehandle.write(line) filehandle.close()
[ "def", "sort", "(", "in_file", ",", "out_file", ")", ":", "filehandle", "=", "open", "(", "in_file", ",", "'r'", ")", "lines", "=", "filehandle", ".", "readlines", "(", ")", "filehandle", ".", "close", "(", ")", "lines", ".", "sort", "(", ")", "fileh...
Sorts the given file.
[ "Sorts", "the", "given", "file", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/attic/toil-sort-example.py#L80-L90
225,015
DataBiosphere/toil
attic/toil-sort-example.py
merge
def merge(filehandle_1, filehandle_2, output_filehandle): """Merges together two files maintaining sorted order. """ line2 = filehandle_2.readline() for line1 in filehandle_1.readlines(): while line2 != '' and line2 <= line1: output_filehandle.write(line2) line2 = filehandle_2.readline() output_filehandle.write(line1) while line2 != '': output_filehandle.write(line2) line2 = filehandle_2.readline()
python
def merge(filehandle_1, filehandle_2, output_filehandle): line2 = filehandle_2.readline() for line1 in filehandle_1.readlines(): while line2 != '' and line2 <= line1: output_filehandle.write(line2) line2 = filehandle_2.readline() output_filehandle.write(line1) while line2 != '': output_filehandle.write(line2) line2 = filehandle_2.readline()
[ "def", "merge", "(", "filehandle_1", ",", "filehandle_2", ",", "output_filehandle", ")", ":", "line2", "=", "filehandle_2", ".", "readline", "(", ")", "for", "line1", "in", "filehandle_1", ".", "readlines", "(", ")", ":", "while", "line2", "!=", "''", "and...
Merges together two files maintaining sorted order.
[ "Merges", "together", "two", "files", "maintaining", "sorted", "order", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/attic/toil-sort-example.py#L93-L104
225,016
DataBiosphere/toil
attic/toil-sort-example.py
get_midpoint
def get_midpoint(file, file_start, file_end): """Finds the point in the file to split. Returns an int i such that fileStart <= i < fileEnd """ filehandle = open(file, 'r') mid_point = (file_start + file_end) / 2 assert mid_point >= file_start filehandle.seek(mid_point) line = filehandle.readline() assert len(line) >= 1 if len(line) + mid_point < file_end: return mid_point + len(line) - 1 filehandle.seek(file_start) line = filehandle.readline() assert len(line) >= 1 assert len(line) + file_start <= file_end return len(line) + file_start - 1
python
def get_midpoint(file, file_start, file_end): filehandle = open(file, 'r') mid_point = (file_start + file_end) / 2 assert mid_point >= file_start filehandle.seek(mid_point) line = filehandle.readline() assert len(line) >= 1 if len(line) + mid_point < file_end: return mid_point + len(line) - 1 filehandle.seek(file_start) line = filehandle.readline() assert len(line) >= 1 assert len(line) + file_start <= file_end return len(line) + file_start - 1
[ "def", "get_midpoint", "(", "file", ",", "file_start", ",", "file_end", ")", ":", "filehandle", "=", "open", "(", "file", ",", "'r'", ")", "mid_point", "=", "(", "file_start", "+", "file_end", ")", "/", "2", "assert", "mid_point", ">=", "file_start", "fi...
Finds the point in the file to split. Returns an int i such that fileStart <= i < fileEnd
[ "Finds", "the", "point", "in", "the", "file", "to", "split", ".", "Returns", "an", "int", "i", "such", "that", "fileStart", "<", "=", "i", "<", "fileEnd" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/attic/toil-sort-example.py#L118-L134
225,017
DataBiosphere/toil
src/toil/jobStores/fileJobStore.py
FileJobStore.robust_rmtree
def robust_rmtree(self, path, max_retries=3): """Robustly tries to delete paths. Retries several times (with increasing delays) if an OSError occurs. If the final attempt fails, the Exception is propagated to the caller. Borrowing patterns from: https://github.com/hashdist/hashdist """ delay = 1 for _ in range(max_retries): try: shutil.rmtree(path) break except OSError: logger.debug('Unable to remove path: {}. Retrying in {} seconds.'.format(path, delay)) time.sleep(delay) delay *= 2 if os.path.exists(path): # Final attempt, pass any Exceptions up to caller. shutil.rmtree(path)
python
def robust_rmtree(self, path, max_retries=3): delay = 1 for _ in range(max_retries): try: shutil.rmtree(path) break except OSError: logger.debug('Unable to remove path: {}. Retrying in {} seconds.'.format(path, delay)) time.sleep(delay) delay *= 2 if os.path.exists(path): # Final attempt, pass any Exceptions up to caller. shutil.rmtree(path)
[ "def", "robust_rmtree", "(", "self", ",", "path", ",", "max_retries", "=", "3", ")", ":", "delay", "=", "1", "for", "_", "in", "range", "(", "max_retries", ")", ":", "try", ":", "shutil", ".", "rmtree", "(", "path", ")", "break", "except", "OSError",...
Robustly tries to delete paths. Retries several times (with increasing delays) if an OSError occurs. If the final attempt fails, the Exception is propagated to the caller. Borrowing patterns from: https://github.com/hashdist/hashdist
[ "Robustly", "tries", "to", "delete", "paths", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/fileJobStore.py#L94-L117
225,018
DataBiosphere/toil
src/toil/jobStores/fileJobStore.py
FileJobStore._getUniqueName
def _getUniqueName(self, fileName, jobStoreID=None, sourceFunctionName="x"): """ Create unique file name within a jobStore directory or tmp directory. :param fileName: A file name, which can be a full path as only the basename will be used. :param jobStoreID: If given, the path returned will be in the jobStore directory. Otherwise, the tmp directory will be used. :param sourceFunctionName: This name is the name of the function that generated this file. Defaults to x if that name was not a normal name. Used for tracking files. :return: The full path with a unique file name. """ fd, absPath = self._getTempFile(jobStoreID) os.close(fd) os.unlink(absPath) # remove the .tmp extension and add the file name (noExt,ext) = os.path.splitext(absPath) uniquePath = noExt + '-' + sourceFunctionName + '-' + os.path.basename(fileName) if os.path.exists(absPath): return absPath # give up, just return temp name to avoid conflicts return uniquePath
python
def _getUniqueName(self, fileName, jobStoreID=None, sourceFunctionName="x"): fd, absPath = self._getTempFile(jobStoreID) os.close(fd) os.unlink(absPath) # remove the .tmp extension and add the file name (noExt,ext) = os.path.splitext(absPath) uniquePath = noExt + '-' + sourceFunctionName + '-' + os.path.basename(fileName) if os.path.exists(absPath): return absPath # give up, just return temp name to avoid conflicts return uniquePath
[ "def", "_getUniqueName", "(", "self", ",", "fileName", ",", "jobStoreID", "=", "None", ",", "sourceFunctionName", "=", "\"x\"", ")", ":", "fd", ",", "absPath", "=", "self", ".", "_getTempFile", "(", "jobStoreID", ")", "os", ".", "close", "(", "fd", ")", ...
Create unique file name within a jobStore directory or tmp directory. :param fileName: A file name, which can be a full path as only the basename will be used. :param jobStoreID: If given, the path returned will be in the jobStore directory. Otherwise, the tmp directory will be used. :param sourceFunctionName: This name is the name of the function that generated this file. Defaults to x if that name was not a normal name. Used for tracking files. :return: The full path with a unique file name.
[ "Create", "unique", "file", "name", "within", "a", "jobStore", "directory", "or", "tmp", "directory", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/fileJobStore.py#L605-L626
225,019
DataBiosphere/toil
src/toil/jobStores/azureJobStore.py
_fetchAzureAccountKey
def _fetchAzureAccountKey(accountName): """ Find the account key for a given Azure storage account. The account key is taken from the AZURE_ACCOUNT_KEY_<account> environment variable if it exists, then from plain AZURE_ACCOUNT_KEY, and then from looking in the file ~/.toilAzureCredentials. That file has format: [AzureStorageCredentials] accountName1=ACCOUNTKEY1== accountName2=ACCOUNTKEY2== """ try: return os.environ['AZURE_ACCOUNT_KEY_' + accountName] except KeyError: try: return os.environ['AZURE_ACCOUNT_KEY'] except KeyError: configParser = RawConfigParser() configParser.read(os.path.expanduser(credential_file_path)) try: return configParser.get('AzureStorageCredentials', accountName) except NoOptionError: raise RuntimeError("No account key found for '%s', please provide it in '%s'" % (accountName, credential_file_path))
python
def _fetchAzureAccountKey(accountName): try: return os.environ['AZURE_ACCOUNT_KEY_' + accountName] except KeyError: try: return os.environ['AZURE_ACCOUNT_KEY'] except KeyError: configParser = RawConfigParser() configParser.read(os.path.expanduser(credential_file_path)) try: return configParser.get('AzureStorageCredentials', accountName) except NoOptionError: raise RuntimeError("No account key found for '%s', please provide it in '%s'" % (accountName, credential_file_path))
[ "def", "_fetchAzureAccountKey", "(", "accountName", ")", ":", "try", ":", "return", "os", ".", "environ", "[", "'AZURE_ACCOUNT_KEY_'", "+", "accountName", "]", "except", "KeyError", ":", "try", ":", "return", "os", ".", "environ", "[", "'AZURE_ACCOUNT_KEY'", "...
Find the account key for a given Azure storage account. The account key is taken from the AZURE_ACCOUNT_KEY_<account> environment variable if it exists, then from plain AZURE_ACCOUNT_KEY, and then from looking in the file ~/.toilAzureCredentials. That file has format: [AzureStorageCredentials] accountName1=ACCOUNTKEY1== accountName2=ACCOUNTKEY2==
[ "Find", "the", "account", "key", "for", "a", "given", "Azure", "storage", "account", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/azureJobStore.py#L74-L98
225,020
DataBiosphere/toil
src/toil/lib/docker.py
dockerPredicate
def dockerPredicate(e): """ Used to ensure Docker exceptions are retried if appropriate :param e: Exception :return: True if e retriable, else False """ if not isinstance(e, subprocess.CalledProcessError): return False if e.returncode == 125: return True
python
def dockerPredicate(e): if not isinstance(e, subprocess.CalledProcessError): return False if e.returncode == 125: return True
[ "def", "dockerPredicate", "(", "e", ")", ":", "if", "not", "isinstance", "(", "e", ",", "subprocess", ".", "CalledProcessError", ")", ":", "return", "False", "if", "e", ".", "returncode", "==", "125", ":", "return", "True" ]
Used to ensure Docker exceptions are retried if appropriate :param e: Exception :return: True if e retriable, else False
[ "Used", "to", "ensure", "Docker", "exceptions", "are", "retried", "if", "appropriate" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/docker.py#L31-L41
225,021
DataBiosphere/toil
src/toil/lib/docker.py
getContainerName
def getContainerName(job): """Create a random string including the job name, and return it.""" return '--'.join([str(job), base64.b64encode(os.urandom(9), b'-_').decode('utf-8')])\ .replace("'", '').replace('"', '').replace('_', '')
python
def getContainerName(job): return '--'.join([str(job), base64.b64encode(os.urandom(9), b'-_').decode('utf-8')])\ .replace("'", '').replace('"', '').replace('_', '')
[ "def", "getContainerName", "(", "job", ")", ":", "return", "'--'", ".", "join", "(", "[", "str", "(", "job", ")", ",", "base64", ".", "b64encode", "(", "os", ".", "urandom", "(", "9", ")", ",", "b'-_'", ")", ".", "decode", "(", "'utf-8'", ")", "]...
Create a random string including the job name, and return it.
[ "Create", "a", "random", "string", "including", "the", "job", "name", "and", "return", "it", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/docker.py#L500-L504
225,022
DataBiosphere/toil
src/toil/provisioners/ansibleDriver.py
AnsibleDriver.callPlaybook
def callPlaybook(self, playbook, ansibleArgs, wait=True, tags=["all"]): """ Run a playbook. :param playbook: An Ansible playbook to run. :param ansibleArgs: Arguments to pass to the playbook. :param wait: Wait for the play to finish if true. :param tags: Control tags for the play. """ playbook = os.path.join(self.playbooks, playbook) # Path to playbook being executed verbosity = "-vvvvv" if logger.isEnabledFor(logging.DEBUG) else "-v" command = ["ansible-playbook", verbosity, "--tags", ",".join(tags), "--extra-vars"] command.append(" ".join(["=".join(i) for i in ansibleArgs.items()])) # Arguments being passed to playbook command.append(playbook) logger.debug("Executing Ansible call `%s`", " ".join(command)) p = subprocess.Popen(command) if wait: p.communicate() if p.returncode != 0: # FIXME: parse error codes raise RuntimeError("Ansible reported an error when executing playbook %s" % playbook)
python
def callPlaybook(self, playbook, ansibleArgs, wait=True, tags=["all"]): playbook = os.path.join(self.playbooks, playbook) # Path to playbook being executed verbosity = "-vvvvv" if logger.isEnabledFor(logging.DEBUG) else "-v" command = ["ansible-playbook", verbosity, "--tags", ",".join(tags), "--extra-vars"] command.append(" ".join(["=".join(i) for i in ansibleArgs.items()])) # Arguments being passed to playbook command.append(playbook) logger.debug("Executing Ansible call `%s`", " ".join(command)) p = subprocess.Popen(command) if wait: p.communicate() if p.returncode != 0: # FIXME: parse error codes raise RuntimeError("Ansible reported an error when executing playbook %s" % playbook)
[ "def", "callPlaybook", "(", "self", ",", "playbook", ",", "ansibleArgs", ",", "wait", "=", "True", ",", "tags", "=", "[", "\"all\"", "]", ")", ":", "playbook", "=", "os", ".", "path", ".", "join", "(", "self", ".", "playbooks", ",", "playbook", ")", ...
Run a playbook. :param playbook: An Ansible playbook to run. :param ansibleArgs: Arguments to pass to the playbook. :param wait: Wait for the play to finish if true. :param tags: Control tags for the play.
[ "Run", "a", "playbook", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/ansibleDriver.py#L31-L52
225,023
DataBiosphere/toil
src/toil/leader.py
Leader.run
def run(self): """ This runs the leader process to issue and manage jobs. :raises: toil.leader.FailedJobsException if at the end of function their remain \ failed jobs :return: The return value of the root job's run function. :rtype: Any """ # Start the stats/logging aggregation thread self.statsAndLogging.start() if self.config.metrics: self.toilMetrics = ToilMetrics(provisioner=self.provisioner) try: # Start service manager thread self.serviceManager.start() try: # Create cluster scaling processes if not None if self.clusterScaler is not None: self.clusterScaler.start() try: # Run the main loop self.innerLoop() finally: if self.clusterScaler is not None: logger.debug('Waiting for workers to shutdown.') startTime = time.time() self.clusterScaler.shutdown() logger.debug('Worker shutdown complete in %s seconds.', time.time() - startTime) finally: # Ensure service manager thread is properly shutdown self.serviceManager.shutdown() finally: # Ensure the stats and logging thread is properly shutdown self.statsAndLogging.shutdown() if self.toilMetrics: self.toilMetrics.shutdown() # Filter the failed jobs self.toilState.totalFailedJobs = [j for j in self.toilState.totalFailedJobs if self.jobStore.exists(j.jobStoreID)] try: self.create_status_sentinel_file(self.toilState.totalFailedJobs) except IOError as e: logger.debug('Error from importFile with hardlink=True: {}'.format(e)) logger.info("Finished toil run %s" % ("successfully." if not self.toilState.totalFailedJobs \ else ("with %s failed jobs." % len(self.toilState.totalFailedJobs)))) if len(self.toilState.totalFailedJobs): logger.info("Failed jobs at end of the run: %s", ' '.join(str(job) for job in self.toilState.totalFailedJobs)) # Cleanup if len(self.toilState.totalFailedJobs) > 0: raise FailedJobsException(self.config.jobStore, self.toilState.totalFailedJobs, self.jobStore) return self.jobStore.getRootJobReturnValue()
python
def run(self): # Start the stats/logging aggregation thread self.statsAndLogging.start() if self.config.metrics: self.toilMetrics = ToilMetrics(provisioner=self.provisioner) try: # Start service manager thread self.serviceManager.start() try: # Create cluster scaling processes if not None if self.clusterScaler is not None: self.clusterScaler.start() try: # Run the main loop self.innerLoop() finally: if self.clusterScaler is not None: logger.debug('Waiting for workers to shutdown.') startTime = time.time() self.clusterScaler.shutdown() logger.debug('Worker shutdown complete in %s seconds.', time.time() - startTime) finally: # Ensure service manager thread is properly shutdown self.serviceManager.shutdown() finally: # Ensure the stats and logging thread is properly shutdown self.statsAndLogging.shutdown() if self.toilMetrics: self.toilMetrics.shutdown() # Filter the failed jobs self.toilState.totalFailedJobs = [j for j in self.toilState.totalFailedJobs if self.jobStore.exists(j.jobStoreID)] try: self.create_status_sentinel_file(self.toilState.totalFailedJobs) except IOError as e: logger.debug('Error from importFile with hardlink=True: {}'.format(e)) logger.info("Finished toil run %s" % ("successfully." if not self.toilState.totalFailedJobs \ else ("with %s failed jobs." % len(self.toilState.totalFailedJobs)))) if len(self.toilState.totalFailedJobs): logger.info("Failed jobs at end of the run: %s", ' '.join(str(job) for job in self.toilState.totalFailedJobs)) # Cleanup if len(self.toilState.totalFailedJobs) > 0: raise FailedJobsException(self.config.jobStore, self.toilState.totalFailedJobs, self.jobStore) return self.jobStore.getRootJobReturnValue()
[ "def", "run", "(", "self", ")", ":", "# Start the stats/logging aggregation thread", "self", ".", "statsAndLogging", ".", "start", "(", ")", "if", "self", ".", "config", ".", "metrics", ":", "self", ".", "toilMetrics", "=", "ToilMetrics", "(", "provisioner", "...
This runs the leader process to issue and manage jobs. :raises: toil.leader.FailedJobsException if at the end of function their remain \ failed jobs :return: The return value of the root job's run function. :rtype: Any
[ "This", "runs", "the", "leader", "process", "to", "issue", "and", "manage", "jobs", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L184-L247
225,024
DataBiosphere/toil
src/toil/leader.py
Leader.create_status_sentinel_file
def create_status_sentinel_file(self, fail): """Create a file in the jobstore indicating failure or success.""" logName = 'failed.log' if fail else 'succeeded.log' localLog = os.path.join(os.getcwd(), logName) open(localLog, 'w').close() self.jobStore.importFile('file://' + localLog, logName, hardlink=True) if os.path.exists(localLog): # Bandaid for Jenkins tests failing stochastically and unexplainably. os.remove(localLog)
python
def create_status_sentinel_file(self, fail): logName = 'failed.log' if fail else 'succeeded.log' localLog = os.path.join(os.getcwd(), logName) open(localLog, 'w').close() self.jobStore.importFile('file://' + localLog, logName, hardlink=True) if os.path.exists(localLog): # Bandaid for Jenkins tests failing stochastically and unexplainably. os.remove(localLog)
[ "def", "create_status_sentinel_file", "(", "self", ",", "fail", ")", ":", "logName", "=", "'failed.log'", "if", "fail", "else", "'succeeded.log'", "localLog", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "logName", ")", "...
Create a file in the jobstore indicating failure or success.
[ "Create", "a", "file", "in", "the", "jobstore", "indicating", "failure", "or", "success", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L249-L257
225,025
DataBiosphere/toil
src/toil/leader.py
Leader._checkSuccessorReadyToRunMultiplePredecessors
def _checkSuccessorReadyToRunMultiplePredecessors(self, jobGraph, jobNode, successorJobStoreID): """Handle the special cases of checking if a successor job is ready to run when there are multiple predecessors""" # See implementation note at the top of this file for discussion of multiple predecessors logger.debug("Successor job: %s of job: %s has multiple " "predecessors", jobNode, jobGraph) # Get the successor job graph, which is caches if successorJobStoreID not in self.toilState.jobsToBeScheduledWithMultiplePredecessors: self.toilState.jobsToBeScheduledWithMultiplePredecessors[successorJobStoreID] = self.jobStore.load(successorJobStoreID) successorJobGraph = self.toilState.jobsToBeScheduledWithMultiplePredecessors[successorJobStoreID] # Add the jobGraph as a finished predecessor to the successor successorJobGraph.predecessorsFinished.add(jobGraph.jobStoreID) # If the successor is in the set of successors of failed jobs if successorJobStoreID in self.toilState.failedSuccessors: if not self._handledFailedSuccessor(jobNode, jobGraph, successorJobStoreID): return False # If the successor job's predecessors have all not all completed then # ignore the jobGraph as is not yet ready to run assert len(successorJobGraph.predecessorsFinished) <= successorJobGraph.predecessorNumber if len(successorJobGraph.predecessorsFinished) < successorJobGraph.predecessorNumber: return False else: # Remove the successor job from the cache self.toilState.jobsToBeScheduledWithMultiplePredecessors.pop(successorJobStoreID) return True
python
def _checkSuccessorReadyToRunMultiplePredecessors(self, jobGraph, jobNode, successorJobStoreID): # See implementation note at the top of this file for discussion of multiple predecessors logger.debug("Successor job: %s of job: %s has multiple " "predecessors", jobNode, jobGraph) # Get the successor job graph, which is caches if successorJobStoreID not in self.toilState.jobsToBeScheduledWithMultiplePredecessors: self.toilState.jobsToBeScheduledWithMultiplePredecessors[successorJobStoreID] = self.jobStore.load(successorJobStoreID) successorJobGraph = self.toilState.jobsToBeScheduledWithMultiplePredecessors[successorJobStoreID] # Add the jobGraph as a finished predecessor to the successor successorJobGraph.predecessorsFinished.add(jobGraph.jobStoreID) # If the successor is in the set of successors of failed jobs if successorJobStoreID in self.toilState.failedSuccessors: if not self._handledFailedSuccessor(jobNode, jobGraph, successorJobStoreID): return False # If the successor job's predecessors have all not all completed then # ignore the jobGraph as is not yet ready to run assert len(successorJobGraph.predecessorsFinished) <= successorJobGraph.predecessorNumber if len(successorJobGraph.predecessorsFinished) < successorJobGraph.predecessorNumber: return False else: # Remove the successor job from the cache self.toilState.jobsToBeScheduledWithMultiplePredecessors.pop(successorJobStoreID) return True
[ "def", "_checkSuccessorReadyToRunMultiplePredecessors", "(", "self", ",", "jobGraph", ",", "jobNode", ",", "successorJobStoreID", ")", ":", "# See implementation note at the top of this file for discussion of multiple predecessors", "logger", ".", "debug", "(", "\"Successor job: %s...
Handle the special cases of checking if a successor job is ready to run when there are multiple predecessors
[ "Handle", "the", "special", "cases", "of", "checking", "if", "a", "successor", "job", "is", "ready", "to", "run", "when", "there", "are", "multiple", "predecessors" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L287-L315
225,026
DataBiosphere/toil
src/toil/leader.py
Leader._makeJobSuccessorReadyToRun
def _makeJobSuccessorReadyToRun(self, jobGraph, jobNode): """make a successor job ready to run, returning False if they should not yet be run""" successorJobStoreID = jobNode.jobStoreID #Build map from successor to predecessors. if successorJobStoreID not in self.toilState.successorJobStoreIDToPredecessorJobs: self.toilState.successorJobStoreIDToPredecessorJobs[successorJobStoreID] = [] self.toilState.successorJobStoreIDToPredecessorJobs[successorJobStoreID].append(jobGraph) if jobNode.predecessorNumber > 1: return self._checkSuccessorReadyToRunMultiplePredecessors(jobGraph, jobNode, successorJobStoreID) else: return True
python
def _makeJobSuccessorReadyToRun(self, jobGraph, jobNode): successorJobStoreID = jobNode.jobStoreID #Build map from successor to predecessors. if successorJobStoreID not in self.toilState.successorJobStoreIDToPredecessorJobs: self.toilState.successorJobStoreIDToPredecessorJobs[successorJobStoreID] = [] self.toilState.successorJobStoreIDToPredecessorJobs[successorJobStoreID].append(jobGraph) if jobNode.predecessorNumber > 1: return self._checkSuccessorReadyToRunMultiplePredecessors(jobGraph, jobNode, successorJobStoreID) else: return True
[ "def", "_makeJobSuccessorReadyToRun", "(", "self", ",", "jobGraph", ",", "jobNode", ")", ":", "successorJobStoreID", "=", "jobNode", ".", "jobStoreID", "#Build map from successor to predecessors.", "if", "successorJobStoreID", "not", "in", "self", ".", "toilState", ".",...
make a successor job ready to run, returning False if they should not yet be run
[ "make", "a", "successor", "job", "ready", "to", "run", "returning", "False", "if", "they", "should", "not", "yet", "be", "run" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L317-L329
225,027
DataBiosphere/toil
src/toil/leader.py
Leader._processFailedSuccessors
def _processFailedSuccessors(self, jobGraph): """Some of the jobs successors failed then either fail the job or restart it if it has retries left and is a checkpoint job""" if jobGraph.jobStoreID in self.toilState.servicesIssued: # The job has services running, signal for them to be killed # once they are killed then the jobGraph will be re-added to # the updatedJobs set and then scheduled to be removed logger.debug("Telling job: %s to terminate its services due to successor failure", jobGraph.jobStoreID) self.serviceManager.killServices(self.toilState.servicesIssued[jobGraph.jobStoreID], error=True) elif jobGraph.jobStoreID in self.toilState.successorCounts: # The job has non-service jobs running wait for them to finish # the job will be re-added to the updated jobs when these jobs # are done logger.debug("Job %s with ID: %s with failed successors still has successor jobs running", jobGraph, jobGraph.jobStoreID) elif jobGraph.checkpoint is not None and jobGraph.remainingRetryCount > 1: # If the job is a checkpoint and has remaining retries then reissue it. # The logic behind using > 1 rather than > 0 here: Since this job has # been tried once (without decreasing its retry count as the job # itself was successful), and its subtree failed, it shouldn't be retried # unless it has more than 1 try. logger.warn('Job: %s is being restarted as a checkpoint after the total ' 'failure of jobs in its subtree.', jobGraph.jobStoreID) self.issueJob(JobNode.fromJobGraph(jobGraph)) else: # Mark it totally failed logger.debug("Job %s is being processed as completely failed", jobGraph.jobStoreID) self.processTotallyFailedJob(jobGraph)
python
def _processFailedSuccessors(self, jobGraph): if jobGraph.jobStoreID in self.toilState.servicesIssued: # The job has services running, signal for them to be killed # once they are killed then the jobGraph will be re-added to # the updatedJobs set and then scheduled to be removed logger.debug("Telling job: %s to terminate its services due to successor failure", jobGraph.jobStoreID) self.serviceManager.killServices(self.toilState.servicesIssued[jobGraph.jobStoreID], error=True) elif jobGraph.jobStoreID in self.toilState.successorCounts: # The job has non-service jobs running wait for them to finish # the job will be re-added to the updated jobs when these jobs # are done logger.debug("Job %s with ID: %s with failed successors still has successor jobs running", jobGraph, jobGraph.jobStoreID) elif jobGraph.checkpoint is not None and jobGraph.remainingRetryCount > 1: # If the job is a checkpoint and has remaining retries then reissue it. # The logic behind using > 1 rather than > 0 here: Since this job has # been tried once (without decreasing its retry count as the job # itself was successful), and its subtree failed, it shouldn't be retried # unless it has more than 1 try. logger.warn('Job: %s is being restarted as a checkpoint after the total ' 'failure of jobs in its subtree.', jobGraph.jobStoreID) self.issueJob(JobNode.fromJobGraph(jobGraph)) else: # Mark it totally failed logger.debug("Job %s is being processed as completely failed", jobGraph.jobStoreID) self.processTotallyFailedJob(jobGraph)
[ "def", "_processFailedSuccessors", "(", "self", ",", "jobGraph", ")", ":", "if", "jobGraph", ".", "jobStoreID", "in", "self", ".", "toilState", ".", "servicesIssued", ":", "# The job has services running, signal for them to be killed", "# once they are killed then the jobGrap...
Some of the jobs successors failed then either fail the job or restart it if it has retries left and is a checkpoint job
[ "Some", "of", "the", "jobs", "successors", "failed", "then", "either", "fail", "the", "job", "or", "restart", "it", "if", "it", "has", "retries", "left", "and", "is", "a", "checkpoint", "job" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L347-L377
225,028
DataBiosphere/toil
src/toil/leader.py
Leader._startServiceJobs
def _startServiceJobs(self): """Start any service jobs available from the service manager""" self.issueQueingServiceJobs() while True: serviceJob = self.serviceManager.getServiceJobsToStart(0) # Stop trying to get jobs when function returns None if serviceJob is None: break logger.debug('Launching service job: %s', serviceJob) self.issueServiceJob(serviceJob)
python
def _startServiceJobs(self): self.issueQueingServiceJobs() while True: serviceJob = self.serviceManager.getServiceJobsToStart(0) # Stop trying to get jobs when function returns None if serviceJob is None: break logger.debug('Launching service job: %s', serviceJob) self.issueServiceJob(serviceJob)
[ "def", "_startServiceJobs", "(", "self", ")", ":", "self", ".", "issueQueingServiceJobs", "(", ")", "while", "True", ":", "serviceJob", "=", "self", ".", "serviceManager", ".", "getServiceJobsToStart", "(", "0", ")", "# Stop trying to get jobs when function returns No...
Start any service jobs available from the service manager
[ "Start", "any", "service", "jobs", "available", "from", "the", "service", "manager" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L459-L468
225,029
DataBiosphere/toil
src/toil/leader.py
Leader._processJobsWithRunningServices
def _processJobsWithRunningServices(self): """Get jobs whose services have started""" while True: jobGraph = self.serviceManager.getJobGraphWhoseServicesAreRunning(0) if jobGraph is None: # Stop trying to get jobs when function returns None break logger.debug('Job: %s has established its services.', jobGraph.jobStoreID) jobGraph.services = [] self.toilState.updatedJobs.add((jobGraph, 0))
python
def _processJobsWithRunningServices(self): while True: jobGraph = self.serviceManager.getJobGraphWhoseServicesAreRunning(0) if jobGraph is None: # Stop trying to get jobs when function returns None break logger.debug('Job: %s has established its services.', jobGraph.jobStoreID) jobGraph.services = [] self.toilState.updatedJobs.add((jobGraph, 0))
[ "def", "_processJobsWithRunningServices", "(", "self", ")", ":", "while", "True", ":", "jobGraph", "=", "self", ".", "serviceManager", ".", "getJobGraphWhoseServicesAreRunning", "(", "0", ")", "if", "jobGraph", "is", "None", ":", "# Stop trying to get jobs when functi...
Get jobs whose services have started
[ "Get", "jobs", "whose", "services", "have", "started" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L470-L478
225,030
DataBiosphere/toil
src/toil/leader.py
Leader._gatherUpdatedJobs
def _gatherUpdatedJobs(self, updatedJobTuple): """Gather any new, updated jobGraph from the batch system""" jobID, result, wallTime = updatedJobTuple # easy, track different state try: updatedJob = self.jobBatchSystemIDToIssuedJob[jobID] except KeyError: logger.warn("A result seems to already have been processed " "for job %s", jobID) else: if result == 0: cur_logger = (logger.debug if str(updatedJob.jobName).startswith(CWL_INTERNAL_JOBS) else logger.info) cur_logger('Job ended successfully: %s', updatedJob) if self.toilMetrics: self.toilMetrics.logCompletedJob(updatedJob) else: logger.warn('Job failed with exit value %i: %s', result, updatedJob) self.processFinishedJob(jobID, result, wallTime=wallTime)
python
def _gatherUpdatedJobs(self, updatedJobTuple): jobID, result, wallTime = updatedJobTuple # easy, track different state try: updatedJob = self.jobBatchSystemIDToIssuedJob[jobID] except KeyError: logger.warn("A result seems to already have been processed " "for job %s", jobID) else: if result == 0: cur_logger = (logger.debug if str(updatedJob.jobName).startswith(CWL_INTERNAL_JOBS) else logger.info) cur_logger('Job ended successfully: %s', updatedJob) if self.toilMetrics: self.toilMetrics.logCompletedJob(updatedJob) else: logger.warn('Job failed with exit value %i: %s', result, updatedJob) self.processFinishedJob(jobID, result, wallTime=wallTime)
[ "def", "_gatherUpdatedJobs", "(", "self", ",", "updatedJobTuple", ")", ":", "jobID", ",", "result", ",", "wallTime", "=", "updatedJobTuple", "# easy, track different state", "try", ":", "updatedJob", "=", "self", ".", "jobBatchSystemIDToIssuedJob", "[", "jobID", "]"...
Gather any new, updated jobGraph from the batch system
[ "Gather", "any", "new", "updated", "jobGraph", "from", "the", "batch", "system" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L480-L499
225,031
DataBiosphere/toil
src/toil/leader.py
Leader._processLostJobs
def _processLostJobs(self): """Process jobs that have gone awry""" # In the case that there is nothing happening (no updated jobs to # gather for rescueJobsFrequency seconds) check if there are any jobs # that have run too long (see self.reissueOverLongJobs) or which have # gone missing from the batch system (see self.reissueMissingJobs) if ((time.time() - self.timeSinceJobsLastRescued) >= self.config.rescueJobsFrequency): # We only rescue jobs every N seconds, and when we have apparently # exhausted the current jobGraph supply self.reissueOverLongJobs() logger.info("Reissued any over long jobs") hasNoMissingJobs = self.reissueMissingJobs() if hasNoMissingJobs: self.timeSinceJobsLastRescued = time.time() else: # This means we'll try again in a minute, providing things are quiet self.timeSinceJobsLastRescued += 60 logger.debug("Rescued any (long) missing jobs")
python
def _processLostJobs(self): # In the case that there is nothing happening (no updated jobs to # gather for rescueJobsFrequency seconds) check if there are any jobs # that have run too long (see self.reissueOverLongJobs) or which have # gone missing from the batch system (see self.reissueMissingJobs) if ((time.time() - self.timeSinceJobsLastRescued) >= self.config.rescueJobsFrequency): # We only rescue jobs every N seconds, and when we have apparently # exhausted the current jobGraph supply self.reissueOverLongJobs() logger.info("Reissued any over long jobs") hasNoMissingJobs = self.reissueMissingJobs() if hasNoMissingJobs: self.timeSinceJobsLastRescued = time.time() else: # This means we'll try again in a minute, providing things are quiet self.timeSinceJobsLastRescued += 60 logger.debug("Rescued any (long) missing jobs")
[ "def", "_processLostJobs", "(", "self", ")", ":", "# In the case that there is nothing happening (no updated jobs to", "# gather for rescueJobsFrequency seconds) check if there are any jobs", "# that have run too long (see self.reissueOverLongJobs) or which have", "# gone missing from the batch sy...
Process jobs that have gone awry
[ "Process", "jobs", "that", "have", "gone", "awry" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L501-L519
225,032
DataBiosphere/toil
src/toil/leader.py
Leader.innerLoop
def innerLoop(self): """ The main loop for processing jobs by the leader. """ self.timeSinceJobsLastRescued = time.time() while self.toilState.updatedJobs or \ self.getNumberOfJobsIssued() or \ self.serviceManager.jobsIssuedToServiceManager: if self.toilState.updatedJobs: self._processReadyJobs() # deal with service-related jobs self._startServiceJobs() self._processJobsWithRunningServices() # check in with the batch system updatedJobTuple = self.batchSystem.getUpdatedBatchJob(maxWait=2) if updatedJobTuple is not None: self._gatherUpdatedJobs(updatedJobTuple) else: self._processLostJobs() # Check on the associated threads and exit if a failure is detected self.statsAndLogging.check() self.serviceManager.check() # the cluster scaler object will only be instantiated if autoscaling is enabled if self.clusterScaler is not None: self.clusterScaler.check() if len(self.toilState.updatedJobs) == 0 and self.deadlockThrottler.throttle(wait=False): # Nothing happened this round and it's been long # enough since we last checked. Check for deadlocks. self.checkForDeadlocks() logger.debug("Finished the main loop: no jobs left to run.") # Consistency check the toil state assert self.toilState.updatedJobs == set() assert self.toilState.successorCounts == {} assert self.toilState.successorJobStoreIDToPredecessorJobs == {} assert self.toilState.serviceJobStoreIDToPredecessorJob == {} assert self.toilState.servicesIssued == {}
python
def innerLoop(self): self.timeSinceJobsLastRescued = time.time() while self.toilState.updatedJobs or \ self.getNumberOfJobsIssued() or \ self.serviceManager.jobsIssuedToServiceManager: if self.toilState.updatedJobs: self._processReadyJobs() # deal with service-related jobs self._startServiceJobs() self._processJobsWithRunningServices() # check in with the batch system updatedJobTuple = self.batchSystem.getUpdatedBatchJob(maxWait=2) if updatedJobTuple is not None: self._gatherUpdatedJobs(updatedJobTuple) else: self._processLostJobs() # Check on the associated threads and exit if a failure is detected self.statsAndLogging.check() self.serviceManager.check() # the cluster scaler object will only be instantiated if autoscaling is enabled if self.clusterScaler is not None: self.clusterScaler.check() if len(self.toilState.updatedJobs) == 0 and self.deadlockThrottler.throttle(wait=False): # Nothing happened this round and it's been long # enough since we last checked. Check for deadlocks. self.checkForDeadlocks() logger.debug("Finished the main loop: no jobs left to run.") # Consistency check the toil state assert self.toilState.updatedJobs == set() assert self.toilState.successorCounts == {} assert self.toilState.successorJobStoreIDToPredecessorJobs == {} assert self.toilState.serviceJobStoreIDToPredecessorJob == {} assert self.toilState.servicesIssued == {}
[ "def", "innerLoop", "(", "self", ")", ":", "self", ".", "timeSinceJobsLastRescued", "=", "time", ".", "time", "(", ")", "while", "self", ".", "toilState", ".", "updatedJobs", "or", "self", ".", "getNumberOfJobsIssued", "(", ")", "or", "self", ".", "service...
The main loop for processing jobs by the leader.
[ "The", "main", "loop", "for", "processing", "jobs", "by", "the", "leader", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L522-L565
225,033
DataBiosphere/toil
src/toil/leader.py
Leader.checkForDeadlocks
def checkForDeadlocks(self): """ Checks if the system is deadlocked running service jobs. """ totalRunningJobs = len(self.batchSystem.getRunningBatchJobIDs()) totalServicesIssued = self.serviceJobsIssued + self.preemptableServiceJobsIssued # If there are no updated jobs and at least some jobs running if totalServicesIssued >= totalRunningJobs and totalRunningJobs > 0: serviceJobs = [x for x in list(self.jobBatchSystemIDToIssuedJob.keys()) if isinstance(self.jobBatchSystemIDToIssuedJob[x], ServiceJobNode)] runningServiceJobs = set([x for x in serviceJobs if self.serviceManager.isRunning(self.jobBatchSystemIDToIssuedJob[x])]) assert len(runningServiceJobs) <= totalRunningJobs # If all the running jobs are active services then we have a potential deadlock if len(runningServiceJobs) == totalRunningJobs: # We wait self.config.deadlockWait seconds before declaring the system deadlocked if self.potentialDeadlockedJobs != runningServiceJobs: self.potentialDeadlockedJobs = runningServiceJobs self.potentialDeadlockTime = time.time() elif time.time() - self.potentialDeadlockTime >= self.config.deadlockWait: raise DeadlockException("The system is service deadlocked - all %d running jobs are active services" % totalRunningJobs) else: # We have observed non-service jobs running, so reset the potential deadlock self.potentialDeadlockedJobs = set() self.potentialDeadlockTime = 0 else: # We have observed non-service jobs running, so reset the potential deadlock self.potentialDeadlockedJobs = set() self.potentialDeadlockTime = 0
python
def checkForDeadlocks(self): totalRunningJobs = len(self.batchSystem.getRunningBatchJobIDs()) totalServicesIssued = self.serviceJobsIssued + self.preemptableServiceJobsIssued # If there are no updated jobs and at least some jobs running if totalServicesIssued >= totalRunningJobs and totalRunningJobs > 0: serviceJobs = [x for x in list(self.jobBatchSystemIDToIssuedJob.keys()) if isinstance(self.jobBatchSystemIDToIssuedJob[x], ServiceJobNode)] runningServiceJobs = set([x for x in serviceJobs if self.serviceManager.isRunning(self.jobBatchSystemIDToIssuedJob[x])]) assert len(runningServiceJobs) <= totalRunningJobs # If all the running jobs are active services then we have a potential deadlock if len(runningServiceJobs) == totalRunningJobs: # We wait self.config.deadlockWait seconds before declaring the system deadlocked if self.potentialDeadlockedJobs != runningServiceJobs: self.potentialDeadlockedJobs = runningServiceJobs self.potentialDeadlockTime = time.time() elif time.time() - self.potentialDeadlockTime >= self.config.deadlockWait: raise DeadlockException("The system is service deadlocked - all %d running jobs are active services" % totalRunningJobs) else: # We have observed non-service jobs running, so reset the potential deadlock self.potentialDeadlockedJobs = set() self.potentialDeadlockTime = 0 else: # We have observed non-service jobs running, so reset the potential deadlock self.potentialDeadlockedJobs = set() self.potentialDeadlockTime = 0
[ "def", "checkForDeadlocks", "(", "self", ")", ":", "totalRunningJobs", "=", "len", "(", "self", ".", "batchSystem", ".", "getRunningBatchJobIDs", "(", ")", ")", "totalServicesIssued", "=", "self", ".", "serviceJobsIssued", "+", "self", ".", "preemptableServiceJobs...
Checks if the system is deadlocked running service jobs.
[ "Checks", "if", "the", "system", "is", "deadlocked", "running", "service", "jobs", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L569-L596
225,034
DataBiosphere/toil
src/toil/leader.py
Leader.issueJob
def issueJob(self, jobNode): """Add a job to the queue of jobs.""" jobNode.command = ' '.join((resolveEntryPoint('_toil_worker'), jobNode.jobName, self.jobStoreLocator, jobNode.jobStoreID)) # jobBatchSystemID is an int that is an incremented counter for each job jobBatchSystemID = self.batchSystem.issueBatchJob(jobNode) self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] = jobNode if jobNode.preemptable: # len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued, # so increment this value after the job is added to the issuedJob dict self.preemptableJobsIssued += 1 cur_logger = logger.debug if jobNode.jobName.startswith(CWL_INTERNAL_JOBS) else logger.info cur_logger("Issued job %s with job batch system ID: " "%s and cores: %s, disk: %s, and memory: %s", jobNode, str(jobBatchSystemID), int(jobNode.cores), bytes2human(jobNode.disk), bytes2human(jobNode.memory)) if self.toilMetrics: self.toilMetrics.logIssuedJob(jobNode) self.toilMetrics.logQueueSize(self.getNumberOfJobsIssued())
python
def issueJob(self, jobNode): jobNode.command = ' '.join((resolveEntryPoint('_toil_worker'), jobNode.jobName, self.jobStoreLocator, jobNode.jobStoreID)) # jobBatchSystemID is an int that is an incremented counter for each job jobBatchSystemID = self.batchSystem.issueBatchJob(jobNode) self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] = jobNode if jobNode.preemptable: # len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued, # so increment this value after the job is added to the issuedJob dict self.preemptableJobsIssued += 1 cur_logger = logger.debug if jobNode.jobName.startswith(CWL_INTERNAL_JOBS) else logger.info cur_logger("Issued job %s with job batch system ID: " "%s and cores: %s, disk: %s, and memory: %s", jobNode, str(jobBatchSystemID), int(jobNode.cores), bytes2human(jobNode.disk), bytes2human(jobNode.memory)) if self.toilMetrics: self.toilMetrics.logIssuedJob(jobNode) self.toilMetrics.logQueueSize(self.getNumberOfJobsIssued())
[ "def", "issueJob", "(", "self", ",", "jobNode", ")", ":", "jobNode", ".", "command", "=", "' '", ".", "join", "(", "(", "resolveEntryPoint", "(", "'_toil_worker'", ")", ",", "jobNode", ".", "jobName", ",", "self", ".", "jobStoreLocator", ",", "jobNode", ...
Add a job to the queue of jobs.
[ "Add", "a", "job", "to", "the", "queue", "of", "jobs", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L598-L618
225,035
DataBiosphere/toil
src/toil/leader.py
Leader.issueServiceJob
def issueServiceJob(self, jobNode): """ Issue a service job, putting it on a queue if the maximum number of service jobs to be scheduled has been reached. """ if jobNode.preemptable: self.preemptableServiceJobsToBeIssued.append(jobNode) else: self.serviceJobsToBeIssued.append(jobNode) self.issueQueingServiceJobs()
python
def issueServiceJob(self, jobNode): if jobNode.preemptable: self.preemptableServiceJobsToBeIssued.append(jobNode) else: self.serviceJobsToBeIssued.append(jobNode) self.issueQueingServiceJobs()
[ "def", "issueServiceJob", "(", "self", ",", "jobNode", ")", ":", "if", "jobNode", ".", "preemptable", ":", "self", ".", "preemptableServiceJobsToBeIssued", ".", "append", "(", "jobNode", ")", "else", ":", "self", ".", "serviceJobsToBeIssued", ".", "append", "(...
Issue a service job, putting it on a queue if the maximum number of service jobs to be scheduled has been reached.
[ "Issue", "a", "service", "job", "putting", "it", "on", "a", "queue", "if", "the", "maximum", "number", "of", "service", "jobs", "to", "be", "scheduled", "has", "been", "reached", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L625-L634
225,036
DataBiosphere/toil
src/toil/leader.py
Leader.issueQueingServiceJobs
def issueQueingServiceJobs(self): """Issues any queuing service jobs up to the limit of the maximum allowed.""" while len(self.serviceJobsToBeIssued) > 0 and self.serviceJobsIssued < self.config.maxServiceJobs: self.issueJob(self.serviceJobsToBeIssued.pop()) self.serviceJobsIssued += 1 while len(self.preemptableServiceJobsToBeIssued) > 0 and self.preemptableServiceJobsIssued < self.config.maxPreemptableServiceJobs: self.issueJob(self.preemptableServiceJobsToBeIssued.pop()) self.preemptableServiceJobsIssued += 1
python
def issueQueingServiceJobs(self): while len(self.serviceJobsToBeIssued) > 0 and self.serviceJobsIssued < self.config.maxServiceJobs: self.issueJob(self.serviceJobsToBeIssued.pop()) self.serviceJobsIssued += 1 while len(self.preemptableServiceJobsToBeIssued) > 0 and self.preemptableServiceJobsIssued < self.config.maxPreemptableServiceJobs: self.issueJob(self.preemptableServiceJobsToBeIssued.pop()) self.preemptableServiceJobsIssued += 1
[ "def", "issueQueingServiceJobs", "(", "self", ")", ":", "while", "len", "(", "self", ".", "serviceJobsToBeIssued", ")", ">", "0", "and", "self", ".", "serviceJobsIssued", "<", "self", ".", "config", ".", "maxServiceJobs", ":", "self", ".", "issueJob", "(", ...
Issues any queuing service jobs up to the limit of the maximum allowed.
[ "Issues", "any", "queuing", "service", "jobs", "up", "to", "the", "limit", "of", "the", "maximum", "allowed", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L636-L643
225,037
DataBiosphere/toil
src/toil/leader.py
Leader.removeJob
def removeJob(self, jobBatchSystemID): """Removes a job from the system.""" assert jobBatchSystemID in self.jobBatchSystemIDToIssuedJob jobNode = self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] if jobNode.preemptable: # len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued, # so decrement this value before removing the job from the issuedJob map assert self.preemptableJobsIssued > 0 self.preemptableJobsIssued -= 1 del self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] # If service job if jobNode.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Decrement the number of services if jobNode.preemptable: self.preemptableServiceJobsIssued -= 1 else: self.serviceJobsIssued -= 1 return jobNode
python
def removeJob(self, jobBatchSystemID): assert jobBatchSystemID in self.jobBatchSystemIDToIssuedJob jobNode = self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] if jobNode.preemptable: # len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued, # so decrement this value before removing the job from the issuedJob map assert self.preemptableJobsIssued > 0 self.preemptableJobsIssued -= 1 del self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] # If service job if jobNode.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Decrement the number of services if jobNode.preemptable: self.preemptableServiceJobsIssued -= 1 else: self.serviceJobsIssued -= 1 return jobNode
[ "def", "removeJob", "(", "self", ",", "jobBatchSystemID", ")", ":", "assert", "jobBatchSystemID", "in", "self", ".", "jobBatchSystemIDToIssuedJob", "jobNode", "=", "self", ".", "jobBatchSystemIDToIssuedJob", "[", "jobBatchSystemID", "]", "if", "jobNode", ".", "preem...
Removes a job from the system.
[ "Removes", "a", "job", "from", "the", "system", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L663-L681
225,038
DataBiosphere/toil
src/toil/leader.py
Leader.killJobs
def killJobs(self, jobsToKill): """ Kills the given set of jobs and then sends them for processing """ if len(jobsToKill) > 0: self.batchSystem.killBatchJobs(jobsToKill) for jobBatchSystemID in jobsToKill: self.processFinishedJob(jobBatchSystemID, 1)
python
def killJobs(self, jobsToKill): if len(jobsToKill) > 0: self.batchSystem.killBatchJobs(jobsToKill) for jobBatchSystemID in jobsToKill: self.processFinishedJob(jobBatchSystemID, 1)
[ "def", "killJobs", "(", "self", ",", "jobsToKill", ")", ":", "if", "len", "(", "jobsToKill", ")", ">", "0", ":", "self", ".", "batchSystem", ".", "killBatchJobs", "(", "jobsToKill", ")", "for", "jobBatchSystemID", "in", "jobsToKill", ":", "self", ".", "p...
Kills the given set of jobs and then sends them for processing
[ "Kills", "the", "given", "set", "of", "jobs", "and", "then", "sends", "them", "for", "processing" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L689-L696
225,039
DataBiosphere/toil
src/toil/leader.py
Leader.reissueOverLongJobs
def reissueOverLongJobs(self): """ Check each issued job - if it is running for longer than desirable issue a kill instruction. Wait for the job to die then we pass the job to processFinishedJob. """ maxJobDuration = self.config.maxJobDuration jobsToKill = [] if maxJobDuration < 10000000: # We won't bother doing anything if rescue time > 16 weeks. runningJobs = self.batchSystem.getRunningBatchJobIDs() for jobBatchSystemID in list(runningJobs.keys()): if runningJobs[jobBatchSystemID] > maxJobDuration: logger.warn("The job: %s has been running for: %s seconds, more than the " "max job duration: %s, we'll kill it", str(self.jobBatchSystemIDToIssuedJob[jobBatchSystemID].jobStoreID), str(runningJobs[jobBatchSystemID]), str(maxJobDuration)) jobsToKill.append(jobBatchSystemID) self.killJobs(jobsToKill)
python
def reissueOverLongJobs(self): maxJobDuration = self.config.maxJobDuration jobsToKill = [] if maxJobDuration < 10000000: # We won't bother doing anything if rescue time > 16 weeks. runningJobs = self.batchSystem.getRunningBatchJobIDs() for jobBatchSystemID in list(runningJobs.keys()): if runningJobs[jobBatchSystemID] > maxJobDuration: logger.warn("The job: %s has been running for: %s seconds, more than the " "max job duration: %s, we'll kill it", str(self.jobBatchSystemIDToIssuedJob[jobBatchSystemID].jobStoreID), str(runningJobs[jobBatchSystemID]), str(maxJobDuration)) jobsToKill.append(jobBatchSystemID) self.killJobs(jobsToKill)
[ "def", "reissueOverLongJobs", "(", "self", ")", ":", "maxJobDuration", "=", "self", ".", "config", ".", "maxJobDuration", "jobsToKill", "=", "[", "]", "if", "maxJobDuration", "<", "10000000", ":", "# We won't bother doing anything if rescue time > 16 weeks.", "runningJo...
Check each issued job - if it is running for longer than desirable issue a kill instruction. Wait for the job to die then we pass the job to processFinishedJob.
[ "Check", "each", "issued", "job", "-", "if", "it", "is", "running", "for", "longer", "than", "desirable", "issue", "a", "kill", "instruction", ".", "Wait", "for", "the", "job", "to", "die", "then", "we", "pass", "the", "job", "to", "processFinishedJob", ...
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L700-L718
225,040
DataBiosphere/toil
src/toil/leader.py
Leader.processFinishedJob
def processFinishedJob(self, batchSystemID, resultStatus, wallTime=None): """ Function reads a processed jobGraph file and updates its state. """ jobNode = self.removeJob(batchSystemID) jobStoreID = jobNode.jobStoreID if wallTime is not None and self.clusterScaler is not None: self.clusterScaler.addCompletedJob(jobNode, wallTime) if self.jobStore.exists(jobStoreID): logger.debug("Job %s continues to exist (i.e. has more to do)", jobNode) try: jobGraph = self.jobStore.load(jobStoreID) except NoSuchJobException: # Avoid importing AWSJobStore as the corresponding extra might be missing if self.jobStore.__class__.__name__ == 'AWSJobStore': # We have a ghost job - the job has been deleted but a stale read from # SDB gave us a false positive when we checked for its existence. # Process the job from here as any other job removed from the job store. # This is a temporary work around until https://github.com/BD2KGenomics/toil/issues/1091 # is completed logger.warn('Got a stale read from SDB for job %s', jobNode) self.processRemovedJob(jobNode, resultStatus) return else: raise if jobGraph.logJobStoreFileID is not None: with jobGraph.getLogFileHandle(self.jobStore) as logFileStream: # more memory efficient than read().striplines() while leaving off the # trailing \n left when using readlines() # http://stackoverflow.com/a/15233739 StatsAndLogging.logWithFormatting(jobStoreID, logFileStream, method=logger.warn, message='The job seems to have left a log file, indicating failure: %s' % jobGraph) if self.config.writeLogs or self.config.writeLogsGzip: with jobGraph.getLogFileHandle(self.jobStore) as logFileStream: StatsAndLogging.writeLogFiles(jobGraph.chainedJobs, logFileStream, self.config) if resultStatus != 0: # If the batch system returned a non-zero exit code then the worker # is assumed not to have captured the failure of the job, so we # reduce the retry count here. if jobGraph.logJobStoreFileID is None: logger.warn("No log file is present, despite job failing: %s", jobNode) jobGraph.setupJobAfterFailure(self.config) self.jobStore.update(jobGraph) elif jobStoreID in self.toilState.hasFailedSuccessors: # If the job has completed okay, we can remove it from the list of jobs with failed successors self.toilState.hasFailedSuccessors.remove(jobStoreID) self.toilState.updatedJobs.add((jobGraph, resultStatus)) #Now we know the #jobGraph is done we can add it to the list of updated jobGraph files logger.debug("Added job: %s to active jobs", jobGraph) else: #The jobGraph is done self.processRemovedJob(jobNode, resultStatus)
python
def processFinishedJob(self, batchSystemID, resultStatus, wallTime=None): jobNode = self.removeJob(batchSystemID) jobStoreID = jobNode.jobStoreID if wallTime is not None and self.clusterScaler is not None: self.clusterScaler.addCompletedJob(jobNode, wallTime) if self.jobStore.exists(jobStoreID): logger.debug("Job %s continues to exist (i.e. has more to do)", jobNode) try: jobGraph = self.jobStore.load(jobStoreID) except NoSuchJobException: # Avoid importing AWSJobStore as the corresponding extra might be missing if self.jobStore.__class__.__name__ == 'AWSJobStore': # We have a ghost job - the job has been deleted but a stale read from # SDB gave us a false positive when we checked for its existence. # Process the job from here as any other job removed from the job store. # This is a temporary work around until https://github.com/BD2KGenomics/toil/issues/1091 # is completed logger.warn('Got a stale read from SDB for job %s', jobNode) self.processRemovedJob(jobNode, resultStatus) return else: raise if jobGraph.logJobStoreFileID is not None: with jobGraph.getLogFileHandle(self.jobStore) as logFileStream: # more memory efficient than read().striplines() while leaving off the # trailing \n left when using readlines() # http://stackoverflow.com/a/15233739 StatsAndLogging.logWithFormatting(jobStoreID, logFileStream, method=logger.warn, message='The job seems to have left a log file, indicating failure: %s' % jobGraph) if self.config.writeLogs or self.config.writeLogsGzip: with jobGraph.getLogFileHandle(self.jobStore) as logFileStream: StatsAndLogging.writeLogFiles(jobGraph.chainedJobs, logFileStream, self.config) if resultStatus != 0: # If the batch system returned a non-zero exit code then the worker # is assumed not to have captured the failure of the job, so we # reduce the retry count here. if jobGraph.logJobStoreFileID is None: logger.warn("No log file is present, despite job failing: %s", jobNode) jobGraph.setupJobAfterFailure(self.config) self.jobStore.update(jobGraph) elif jobStoreID in self.toilState.hasFailedSuccessors: # If the job has completed okay, we can remove it from the list of jobs with failed successors self.toilState.hasFailedSuccessors.remove(jobStoreID) self.toilState.updatedJobs.add((jobGraph, resultStatus)) #Now we know the #jobGraph is done we can add it to the list of updated jobGraph files logger.debug("Added job: %s to active jobs", jobGraph) else: #The jobGraph is done self.processRemovedJob(jobNode, resultStatus)
[ "def", "processFinishedJob", "(", "self", ",", "batchSystemID", ",", "resultStatus", ",", "wallTime", "=", "None", ")", ":", "jobNode", "=", "self", ".", "removeJob", "(", "batchSystemID", ")", "jobStoreID", "=", "jobNode", ".", "jobStoreID", "if", "wallTime",...
Function reads a processed jobGraph file and updates its state.
[ "Function", "reads", "a", "processed", "jobGraph", "file", "and", "updates", "its", "state", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L761-L812
225,041
DataBiosphere/toil
src/toil/leader.py
Leader.getSuccessors
def getSuccessors(jobGraph, alreadySeenSuccessors, jobStore): """ Gets successors of the given job by walking the job graph recursively. Any successor in alreadySeenSuccessors is ignored and not traversed. Returns the set of found successors. This set is added to alreadySeenSuccessors. """ successors = set() def successorRecursion(jobGraph): # For lists of successors for successorList in jobGraph.stack: # For each successor in list of successors for successorJobNode in successorList: # If successor not already visited if successorJobNode.jobStoreID not in alreadySeenSuccessors: # Add to set of successors successors.add(successorJobNode.jobStoreID) alreadySeenSuccessors.add(successorJobNode.jobStoreID) # Recurse if job exists # (job may not exist if already completed) if jobStore.exists(successorJobNode.jobStoreID): successorRecursion(jobStore.load(successorJobNode.jobStoreID)) successorRecursion(jobGraph) # Recurse from jobGraph return successors
python
def getSuccessors(jobGraph, alreadySeenSuccessors, jobStore): successors = set() def successorRecursion(jobGraph): # For lists of successors for successorList in jobGraph.stack: # For each successor in list of successors for successorJobNode in successorList: # If successor not already visited if successorJobNode.jobStoreID not in alreadySeenSuccessors: # Add to set of successors successors.add(successorJobNode.jobStoreID) alreadySeenSuccessors.add(successorJobNode.jobStoreID) # Recurse if job exists # (job may not exist if already completed) if jobStore.exists(successorJobNode.jobStoreID): successorRecursion(jobStore.load(successorJobNode.jobStoreID)) successorRecursion(jobGraph) # Recurse from jobGraph return successors
[ "def", "getSuccessors", "(", "jobGraph", ",", "alreadySeenSuccessors", ",", "jobStore", ")", ":", "successors", "=", "set", "(", ")", "def", "successorRecursion", "(", "jobGraph", ")", ":", "# For lists of successors", "for", "successorList", "in", "jobGraph", "."...
Gets successors of the given job by walking the job graph recursively. Any successor in alreadySeenSuccessors is ignored and not traversed. Returns the set of found successors. This set is added to alreadySeenSuccessors.
[ "Gets", "successors", "of", "the", "given", "job", "by", "walking", "the", "job", "graph", "recursively", ".", "Any", "successor", "in", "alreadySeenSuccessors", "is", "ignored", "and", "not", "traversed", ".", "Returns", "the", "set", "of", "found", "successo...
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L815-L843
225,042
DataBiosphere/toil
src/toil/leader.py
Leader.processTotallyFailedJob
def processTotallyFailedJob(self, jobGraph): """ Processes a totally failed job. """ # Mark job as a totally failed job self.toilState.totalFailedJobs.add(JobNode.fromJobGraph(jobGraph)) if self.toilMetrics: self.toilMetrics.logFailedJob(jobGraph) if jobGraph.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Is # a service job logger.debug("Service job is being processed as a totally failed job: %s", jobGraph) predecesssorJobGraph = self.toilState.serviceJobStoreIDToPredecessorJob[jobGraph.jobStoreID] # This removes the service job as a service of the predecessor # and potentially makes the predecessor active self._updatePredecessorStatus(jobGraph.jobStoreID) # Remove the start flag, if it still exists. This indicates # to the service manager that the job has "started", this prevents # the service manager from deadlocking while waiting self.jobStore.deleteFile(jobGraph.startJobStoreID) # Signal to any other services in the group that they should # terminate. We do this to prevent other services in the set # of services from deadlocking waiting for this service to start properly if predecesssorJobGraph.jobStoreID in self.toilState.servicesIssued: self.serviceManager.killServices(self.toilState.servicesIssued[predecesssorJobGraph.jobStoreID], error=True) logger.debug("Job: %s is instructing all the services of its parent job to quit", jobGraph) self.toilState.hasFailedSuccessors.add(predecesssorJobGraph.jobStoreID) # This ensures that the # job will not attempt to run any of it's successors on the stack else: # Is a non-service job assert jobGraph.jobStoreID not in self.toilState.servicesIssued # Traverse failed job's successor graph and get the jobStoreID of new successors. # Any successor already in toilState.failedSuccessors will not be traversed # All successors traversed will be added to toilState.failedSuccessors and returned # as a set (unseenSuccessors). unseenSuccessors = self.getSuccessors(jobGraph, self.toilState.failedSuccessors, self.jobStore) logger.debug("Found new failed successors: %s of job: %s", " ".join( unseenSuccessors), jobGraph) # For each newly found successor for successorJobStoreID in unseenSuccessors: # If the successor is a successor of other jobs that have already tried to schedule it if successorJobStoreID in self.toilState.successorJobStoreIDToPredecessorJobs: # For each such predecessor job # (we remove the successor from toilState.successorJobStoreIDToPredecessorJobs to avoid doing # this multiple times for each failed predecessor) for predecessorJob in self.toilState.successorJobStoreIDToPredecessorJobs.pop(successorJobStoreID): # Reduce the predecessor job's successor count. self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1 # Indicate that it has failed jobs. self.toilState.hasFailedSuccessors.add(predecessorJob.jobStoreID) logger.debug("Marking job: %s as having failed successors (found by " "reading successors failed job)", predecessorJob) # If the predecessor has no remaining successors, add to list of active jobs assert self.toilState.successorCounts[predecessorJob.jobStoreID] >= 0 if self.toilState.successorCounts[predecessorJob.jobStoreID] == 0: self.toilState.updatedJobs.add((predecessorJob, 0)) # Remove the predecessor job from the set of jobs with successors. self.toilState.successorCounts.pop(predecessorJob.jobStoreID) # If the job has predecessor(s) if jobGraph.jobStoreID in self.toilState.successorJobStoreIDToPredecessorJobs: # For each predecessor of the job for predecessorJobGraph in self.toilState.successorJobStoreIDToPredecessorJobs[jobGraph.jobStoreID]: # Mark the predecessor as failed self.toilState.hasFailedSuccessors.add(predecessorJobGraph.jobStoreID) logger.debug("Totally failed job: %s is marking direct predecessor: %s " "as having failed jobs", jobGraph, predecessorJobGraph) self._updatePredecessorStatus(jobGraph.jobStoreID)
python
def processTotallyFailedJob(self, jobGraph): # Mark job as a totally failed job self.toilState.totalFailedJobs.add(JobNode.fromJobGraph(jobGraph)) if self.toilMetrics: self.toilMetrics.logFailedJob(jobGraph) if jobGraph.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Is # a service job logger.debug("Service job is being processed as a totally failed job: %s", jobGraph) predecesssorJobGraph = self.toilState.serviceJobStoreIDToPredecessorJob[jobGraph.jobStoreID] # This removes the service job as a service of the predecessor # and potentially makes the predecessor active self._updatePredecessorStatus(jobGraph.jobStoreID) # Remove the start flag, if it still exists. This indicates # to the service manager that the job has "started", this prevents # the service manager from deadlocking while waiting self.jobStore.deleteFile(jobGraph.startJobStoreID) # Signal to any other services in the group that they should # terminate. We do this to prevent other services in the set # of services from deadlocking waiting for this service to start properly if predecesssorJobGraph.jobStoreID in self.toilState.servicesIssued: self.serviceManager.killServices(self.toilState.servicesIssued[predecesssorJobGraph.jobStoreID], error=True) logger.debug("Job: %s is instructing all the services of its parent job to quit", jobGraph) self.toilState.hasFailedSuccessors.add(predecesssorJobGraph.jobStoreID) # This ensures that the # job will not attempt to run any of it's successors on the stack else: # Is a non-service job assert jobGraph.jobStoreID not in self.toilState.servicesIssued # Traverse failed job's successor graph and get the jobStoreID of new successors. # Any successor already in toilState.failedSuccessors will not be traversed # All successors traversed will be added to toilState.failedSuccessors and returned # as a set (unseenSuccessors). unseenSuccessors = self.getSuccessors(jobGraph, self.toilState.failedSuccessors, self.jobStore) logger.debug("Found new failed successors: %s of job: %s", " ".join( unseenSuccessors), jobGraph) # For each newly found successor for successorJobStoreID in unseenSuccessors: # If the successor is a successor of other jobs that have already tried to schedule it if successorJobStoreID in self.toilState.successorJobStoreIDToPredecessorJobs: # For each such predecessor job # (we remove the successor from toilState.successorJobStoreIDToPredecessorJobs to avoid doing # this multiple times for each failed predecessor) for predecessorJob in self.toilState.successorJobStoreIDToPredecessorJobs.pop(successorJobStoreID): # Reduce the predecessor job's successor count. self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1 # Indicate that it has failed jobs. self.toilState.hasFailedSuccessors.add(predecessorJob.jobStoreID) logger.debug("Marking job: %s as having failed successors (found by " "reading successors failed job)", predecessorJob) # If the predecessor has no remaining successors, add to list of active jobs assert self.toilState.successorCounts[predecessorJob.jobStoreID] >= 0 if self.toilState.successorCounts[predecessorJob.jobStoreID] == 0: self.toilState.updatedJobs.add((predecessorJob, 0)) # Remove the predecessor job from the set of jobs with successors. self.toilState.successorCounts.pop(predecessorJob.jobStoreID) # If the job has predecessor(s) if jobGraph.jobStoreID in self.toilState.successorJobStoreIDToPredecessorJobs: # For each predecessor of the job for predecessorJobGraph in self.toilState.successorJobStoreIDToPredecessorJobs[jobGraph.jobStoreID]: # Mark the predecessor as failed self.toilState.hasFailedSuccessors.add(predecessorJobGraph.jobStoreID) logger.debug("Totally failed job: %s is marking direct predecessor: %s " "as having failed jobs", jobGraph, predecessorJobGraph) self._updatePredecessorStatus(jobGraph.jobStoreID)
[ "def", "processTotallyFailedJob", "(", "self", ",", "jobGraph", ")", ":", "# Mark job as a totally failed job", "self", ".", "toilState", ".", "totalFailedJobs", ".", "add", "(", "JobNode", ".", "fromJobGraph", "(", "jobGraph", ")", ")", "if", "self", ".", "toil...
Processes a totally failed job.
[ "Processes", "a", "totally", "failed", "job", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L845-L929
225,043
DataBiosphere/toil
src/toil/leader.py
Leader._updatePredecessorStatus
def _updatePredecessorStatus(self, jobStoreID): """ Update status of predecessors for finished successor job. """ if jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Is a service job predecessorJob = self.toilState.serviceJobStoreIDToPredecessorJob.pop(jobStoreID) self.toilState.servicesIssued[predecessorJob.jobStoreID].pop(jobStoreID) if len(self.toilState.servicesIssued[predecessorJob.jobStoreID]) == 0: # Predecessor job has # all its services terminated self.toilState.servicesIssued.pop(predecessorJob.jobStoreID) # The job has no running services self.toilState.updatedJobs.add((predecessorJob, 0)) # Now we know # the job is done we can add it to the list of updated job files elif jobStoreID not in self.toilState.successorJobStoreIDToPredecessorJobs: #We have reach the root job assert len(self.toilState.updatedJobs) == 0 assert len(self.toilState.successorJobStoreIDToPredecessorJobs) == 0 assert len(self.toilState.successorCounts) == 0 logger.debug("Reached root job %s so no predecessors to clean up" % jobStoreID) else: # Is a non-root, non-service job logger.debug("Cleaning the predecessors of %s" % jobStoreID) # For each predecessor for predecessorJob in self.toilState.successorJobStoreIDToPredecessorJobs.pop(jobStoreID): # Reduce the predecessor's number of successors by one to indicate the # completion of the jobStoreID job self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1 # If the predecessor job is done and all the successors are complete if self.toilState.successorCounts[predecessorJob.jobStoreID] == 0: # Remove it from the set of jobs with active successors self.toilState.successorCounts.pop(predecessorJob.jobStoreID) if predecessorJob.jobStoreID not in self.toilState.hasFailedSuccessors: # Pop stack at this point, as we can get rid of its successors predecessorJob.stack.pop() # Now we know the job is done we can add it to the list of updated job files assert predecessorJob not in self.toilState.updatedJobs self.toilState.updatedJobs.add((predecessorJob, 0))
python
def _updatePredecessorStatus(self, jobStoreID): if jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Is a service job predecessorJob = self.toilState.serviceJobStoreIDToPredecessorJob.pop(jobStoreID) self.toilState.servicesIssued[predecessorJob.jobStoreID].pop(jobStoreID) if len(self.toilState.servicesIssued[predecessorJob.jobStoreID]) == 0: # Predecessor job has # all its services terminated self.toilState.servicesIssued.pop(predecessorJob.jobStoreID) # The job has no running services self.toilState.updatedJobs.add((predecessorJob, 0)) # Now we know # the job is done we can add it to the list of updated job files elif jobStoreID not in self.toilState.successorJobStoreIDToPredecessorJobs: #We have reach the root job assert len(self.toilState.updatedJobs) == 0 assert len(self.toilState.successorJobStoreIDToPredecessorJobs) == 0 assert len(self.toilState.successorCounts) == 0 logger.debug("Reached root job %s so no predecessors to clean up" % jobStoreID) else: # Is a non-root, non-service job logger.debug("Cleaning the predecessors of %s" % jobStoreID) # For each predecessor for predecessorJob in self.toilState.successorJobStoreIDToPredecessorJobs.pop(jobStoreID): # Reduce the predecessor's number of successors by one to indicate the # completion of the jobStoreID job self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1 # If the predecessor job is done and all the successors are complete if self.toilState.successorCounts[predecessorJob.jobStoreID] == 0: # Remove it from the set of jobs with active successors self.toilState.successorCounts.pop(predecessorJob.jobStoreID) if predecessorJob.jobStoreID not in self.toilState.hasFailedSuccessors: # Pop stack at this point, as we can get rid of its successors predecessorJob.stack.pop() # Now we know the job is done we can add it to the list of updated job files assert predecessorJob not in self.toilState.updatedJobs self.toilState.updatedJobs.add((predecessorJob, 0))
[ "def", "_updatePredecessorStatus", "(", "self", ",", "jobStoreID", ")", ":", "if", "jobStoreID", "in", "self", ".", "toilState", ".", "serviceJobStoreIDToPredecessorJob", ":", "# Is a service job", "predecessorJob", "=", "self", ".", "toilState", ".", "serviceJobStore...
Update status of predecessors for finished successor job.
[ "Update", "status", "of", "predecessors", "for", "finished", "successor", "job", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L931-L975
225,044
DataBiosphere/toil
src/toil/cwl/cwltoil.py
simplify_list
def simplify_list(maybe_list): """Turn a length one list loaded by cwltool into a scalar. Anything else is passed as-is, by reference.""" if isinstance(maybe_list, MutableSequence): is_list = aslist(maybe_list) if len(is_list) == 1: return is_list[0] return maybe_list
python
def simplify_list(maybe_list): if isinstance(maybe_list, MutableSequence): is_list = aslist(maybe_list) if len(is_list) == 1: return is_list[0] return maybe_list
[ "def", "simplify_list", "(", "maybe_list", ")", ":", "if", "isinstance", "(", "maybe_list", ",", "MutableSequence", ")", ":", "is_list", "=", "aslist", "(", "maybe_list", ")", "if", "len", "(", "is_list", ")", "==", "1", ":", "return", "is_list", "[", "0...
Turn a length one list loaded by cwltool into a scalar. Anything else is passed as-is, by reference.
[ "Turn", "a", "length", "one", "list", "loaded", "by", "cwltool", "into", "a", "scalar", ".", "Anything", "else", "is", "passed", "as", "-", "is", "by", "reference", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L203-L210
225,045
DataBiosphere/toil
src/toil/cwl/cwltoil.py
toil_get_file
def toil_get_file(file_store, index, existing, file_store_id): """Get path to input file from Toil jobstore.""" if not file_store_id.startswith("toilfs:"): return file_store.jobStore.getPublicUrl(file_store.jobStore.importFile(file_store_id)) src_path = file_store.readGlobalFile(file_store_id[7:]) index[src_path] = file_store_id existing[file_store_id] = src_path return schema_salad.ref_resolver.file_uri(src_path)
python
def toil_get_file(file_store, index, existing, file_store_id): if not file_store_id.startswith("toilfs:"): return file_store.jobStore.getPublicUrl(file_store.jobStore.importFile(file_store_id)) src_path = file_store.readGlobalFile(file_store_id[7:]) index[src_path] = file_store_id existing[file_store_id] = src_path return schema_salad.ref_resolver.file_uri(src_path)
[ "def", "toil_get_file", "(", "file_store", ",", "index", ",", "existing", ",", "file_store_id", ")", ":", "if", "not", "file_store_id", ".", "startswith", "(", "\"toilfs:\"", ")", ":", "return", "file_store", ".", "jobStore", ".", "getPublicUrl", "(", "file_st...
Get path to input file from Toil jobstore.
[ "Get", "path", "to", "input", "file", "from", "Toil", "jobstore", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L304-L312
225,046
DataBiosphere/toil
src/toil/cwl/cwltoil.py
write_file
def write_file(writeFunc, index, existing, x): """Write a file into the Toil jobstore. 'existing' is a set of files retrieved as inputs from toil_get_file. This ensures they are mapped back as the same name if passed through. """ # Toil fileStore reference if x.startswith("toilfs:"): return x # File literal outputs with no path, we don't write these and will fail # with unsupportedRequirement when retrieving later with getFile elif x.startswith("_:"): return x else: x = existing.get(x, x) if x not in index: if not urlparse.urlparse(x).scheme: rp = os.path.realpath(x) else: rp = x try: index[x] = "toilfs:" + writeFunc(rp) existing[index[x]] = x except Exception as e: cwllogger.error("Got exception '%s' while copying '%s'", e, x) raise return index[x]
python
def write_file(writeFunc, index, existing, x): # Toil fileStore reference if x.startswith("toilfs:"): return x # File literal outputs with no path, we don't write these and will fail # with unsupportedRequirement when retrieving later with getFile elif x.startswith("_:"): return x else: x = existing.get(x, x) if x not in index: if not urlparse.urlparse(x).scheme: rp = os.path.realpath(x) else: rp = x try: index[x] = "toilfs:" + writeFunc(rp) existing[index[x]] = x except Exception as e: cwllogger.error("Got exception '%s' while copying '%s'", e, x) raise return index[x]
[ "def", "write_file", "(", "writeFunc", ",", "index", ",", "existing", ",", "x", ")", ":", "# Toil fileStore reference", "if", "x", ".", "startswith", "(", "\"toilfs:\"", ")", ":", "return", "x", "# File literal outputs with no path, we don't write these and will fail", ...
Write a file into the Toil jobstore. 'existing' is a set of files retrieved as inputs from toil_get_file. This ensures they are mapped back as the same name if passed through.
[ "Write", "a", "file", "into", "the", "Toil", "jobstore", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L315-L342
225,047
DataBiosphere/toil
src/toil/cwl/cwltoil.py
uploadFile
def uploadFile(uploadfunc, fileindex, existing, uf, skip_broken=False): """Update a file object so that the location is a reference to the toil file store, writing it to the file store if necessary. """ if uf["location"].startswith("toilfs:") or uf["location"].startswith("_:"): return if uf["location"] in fileindex: uf["location"] = fileindex[uf["location"]] return if not uf["location"] and uf["path"]: uf["location"] = schema_salad.ref_resolver.file_uri(uf["path"]) if uf["location"].startswith("file://") and not os.path.isfile(uf["location"][7:]): if skip_broken: return else: raise cwltool.errors.WorkflowException( "File is missing: %s" % uf["location"]) uf["location"] = write_file( uploadfunc, fileindex, existing, uf["location"])
python
def uploadFile(uploadfunc, fileindex, existing, uf, skip_broken=False): if uf["location"].startswith("toilfs:") or uf["location"].startswith("_:"): return if uf["location"] in fileindex: uf["location"] = fileindex[uf["location"]] return if not uf["location"] and uf["path"]: uf["location"] = schema_salad.ref_resolver.file_uri(uf["path"]) if uf["location"].startswith("file://") and not os.path.isfile(uf["location"][7:]): if skip_broken: return else: raise cwltool.errors.WorkflowException( "File is missing: %s" % uf["location"]) uf["location"] = write_file( uploadfunc, fileindex, existing, uf["location"])
[ "def", "uploadFile", "(", "uploadfunc", ",", "fileindex", ",", "existing", ",", "uf", ",", "skip_broken", "=", "False", ")", ":", "if", "uf", "[", "\"location\"", "]", ".", "startswith", "(", "\"toilfs:\"", ")", "or", "uf", "[", "\"location\"", "]", ".",...
Update a file object so that the location is a reference to the toil file store, writing it to the file store if necessary.
[ "Update", "a", "file", "object", "so", "that", "the", "location", "is", "a", "reference", "to", "the", "toil", "file", "store", "writing", "it", "to", "the", "file", "store", "if", "necessary", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L345-L365
225,048
DataBiosphere/toil
src/toil/cwl/cwltoil.py
toilStageFiles
def toilStageFiles(file_store, cwljob, outdir, index, existing, export, destBucket=None): """Copy input files out of the global file store and update location and path.""" def _collectDirEntries(obj): # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]]) -> Iterator[Dict[Text, Any]] if isinstance(obj, dict): if obj.get("class") in ("File", "Directory"): yield obj else: for sub_obj in obj.values(): for dir_entry in _collectDirEntries(sub_obj): yield dir_entry elif isinstance(obj, list): for sub_obj in obj: for dir_entry in _collectDirEntries(sub_obj): yield dir_entry jobfiles = list(_collectDirEntries(cwljob)) pm = ToilPathMapper( jobfiles, "", outdir, separateDirs=False, stage_listing=True) for f, p in pm.items(): if not p.staged: continue # Deal with bucket exports if destBucket: # Directories don't need to be created if we're exporting to # a bucket if p.type == "File": # Remove the staging directory from the filepath and # form the destination URL unstageTargetPath = p.target[len(outdir):] destUrl = '/'.join(s.strip('/') for s in [destBucket, unstageTargetPath]) file_store.exportFile(p.resolved[7:], destUrl) continue if not os.path.exists(os.path.dirname(p.target)): os.makedirs(os.path.dirname(p.target), 0o0755) if p.type == "File": file_store.exportFile(p.resolved[7:], "file://" + p.target) elif p.type == "Directory" and not os.path.exists(p.target): os.makedirs(p.target, 0o0755) elif p.type == "CreateFile": with open(p.target, "wb") as n: n.write(p.resolved.encode("utf-8")) def _check_adjust(f): f["location"] = schema_salad.ref_resolver.file_uri( pm.mapper(f["location"])[1]) if "contents" in f: del f["contents"] return f visit_class(cwljob, ("File", "Directory"), _check_adjust)
python
def toilStageFiles(file_store, cwljob, outdir, index, existing, export, destBucket=None): def _collectDirEntries(obj): # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]]) -> Iterator[Dict[Text, Any]] if isinstance(obj, dict): if obj.get("class") in ("File", "Directory"): yield obj else: for sub_obj in obj.values(): for dir_entry in _collectDirEntries(sub_obj): yield dir_entry elif isinstance(obj, list): for sub_obj in obj: for dir_entry in _collectDirEntries(sub_obj): yield dir_entry jobfiles = list(_collectDirEntries(cwljob)) pm = ToilPathMapper( jobfiles, "", outdir, separateDirs=False, stage_listing=True) for f, p in pm.items(): if not p.staged: continue # Deal with bucket exports if destBucket: # Directories don't need to be created if we're exporting to # a bucket if p.type == "File": # Remove the staging directory from the filepath and # form the destination URL unstageTargetPath = p.target[len(outdir):] destUrl = '/'.join(s.strip('/') for s in [destBucket, unstageTargetPath]) file_store.exportFile(p.resolved[7:], destUrl) continue if not os.path.exists(os.path.dirname(p.target)): os.makedirs(os.path.dirname(p.target), 0o0755) if p.type == "File": file_store.exportFile(p.resolved[7:], "file://" + p.target) elif p.type == "Directory" and not os.path.exists(p.target): os.makedirs(p.target, 0o0755) elif p.type == "CreateFile": with open(p.target, "wb") as n: n.write(p.resolved.encode("utf-8")) def _check_adjust(f): f["location"] = schema_salad.ref_resolver.file_uri( pm.mapper(f["location"])[1]) if "contents" in f: del f["contents"] return f visit_class(cwljob, ("File", "Directory"), _check_adjust)
[ "def", "toilStageFiles", "(", "file_store", ",", "cwljob", ",", "outdir", ",", "index", ",", "existing", ",", "export", ",", "destBucket", "=", "None", ")", ":", "def", "_collectDirEntries", "(", "obj", ")", ":", "# type: (Union[Dict[Text, Any], List[Dict[Text, An...
Copy input files out of the global file store and update location and path.
[ "Copy", "input", "files", "out", "of", "the", "global", "file", "store", "and", "update", "location", "and", "path", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L388-L446
225,049
DataBiosphere/toil
src/toil/cwl/cwltoil.py
_makeNestedTempDir
def _makeNestedTempDir(top, seed, levels=2): """ Gets a temporary directory in the hierarchy of directories under a given top directory. This exists to avoid placing too many temporary directories under a single top in a flat structure, which can slow down metadata updates such as deletes on the local file system. The seed parameter allows for deterministic placement of the created directory. The seed is hashed into hex digest and the directory structure is created from the initial letters of the digest. :param top : string, top directory for the hierarchy :param seed : string, the hierarchy will be generated from this seed string :rtype : string, path to temporary directory - will be created when necessary. """ # Valid chars for the creation of temporary directories validDirs = hashlib.md5(six.b(str(seed))).hexdigest() tempDir = top for i in range(max(min(levels, len(validDirs)), 1)): tempDir = os.path.join(tempDir, validDirs[i]) if not os.path.exists(tempDir): try: os.makedirs(tempDir) except os.error: if not os.path.exists(tempDir): # In the case that a collision occurs and # it is created while we wait then we ignore raise return tempDir
python
def _makeNestedTempDir(top, seed, levels=2): # Valid chars for the creation of temporary directories validDirs = hashlib.md5(six.b(str(seed))).hexdigest() tempDir = top for i in range(max(min(levels, len(validDirs)), 1)): tempDir = os.path.join(tempDir, validDirs[i]) if not os.path.exists(tempDir): try: os.makedirs(tempDir) except os.error: if not os.path.exists(tempDir): # In the case that a collision occurs and # it is created while we wait then we ignore raise return tempDir
[ "def", "_makeNestedTempDir", "(", "top", ",", "seed", ",", "levels", "=", "2", ")", ":", "# Valid chars for the creation of temporary directories", "validDirs", "=", "hashlib", ".", "md5", "(", "six", ".", "b", "(", "str", "(", "seed", ")", ")", ")", ".", ...
Gets a temporary directory in the hierarchy of directories under a given top directory. This exists to avoid placing too many temporary directories under a single top in a flat structure, which can slow down metadata updates such as deletes on the local file system. The seed parameter allows for deterministic placement of the created directory. The seed is hashed into hex digest and the directory structure is created from the initial letters of the digest. :param top : string, top directory for the hierarchy :param seed : string, the hierarchy will be generated from this seed string :rtype : string, path to temporary directory - will be created when necessary.
[ "Gets", "a", "temporary", "directory", "in", "the", "hierarchy", "of", "directories", "under", "a", "given", "top", "directory", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L472-L503
225,050
DataBiosphere/toil
src/toil/cwl/cwltoil.py
remove_pickle_problems
def remove_pickle_problems(obj): """doc_loader does not pickle correctly, causing Toil errors, remove from objects. """ if hasattr(obj, "doc_loader"): obj.doc_loader = None if hasattr(obj, "embedded_tool"): obj.embedded_tool = remove_pickle_problems(obj.embedded_tool) if hasattr(obj, "steps"): obj.steps = [remove_pickle_problems(s) for s in obj.steps] return obj
python
def remove_pickle_problems(obj): if hasattr(obj, "doc_loader"): obj.doc_loader = None if hasattr(obj, "embedded_tool"): obj.embedded_tool = remove_pickle_problems(obj.embedded_tool) if hasattr(obj, "steps"): obj.steps = [remove_pickle_problems(s) for s in obj.steps] return obj
[ "def", "remove_pickle_problems", "(", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "\"doc_loader\"", ")", ":", "obj", ".", "doc_loader", "=", "None", "if", "hasattr", "(", "obj", ",", "\"embedded_tool\"", ")", ":", "obj", ".", "embedded_tool", "=", ...
doc_loader does not pickle correctly, causing Toil errors, remove from objects.
[ "doc_loader", "does", "not", "pickle", "correctly", "causing", "Toil", "errors", "remove", "from", "objects", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L813-L823
225,051
DataBiosphere/toil
src/toil/cwl/cwltoil.py
cleanTempDirs
def cleanTempDirs(job): """Remove temporarly created directories.""" if job is CWLJob and job._succeeded: # Only CWLJobs have this attribute. for tempDir in job.openTempDirs: if os.path.exists(tempDir): shutil.rmtree(tempDir) job.openTempDirs = []
python
def cleanTempDirs(job): if job is CWLJob and job._succeeded: # Only CWLJobs have this attribute. for tempDir in job.openTempDirs: if os.path.exists(tempDir): shutil.rmtree(tempDir) job.openTempDirs = []
[ "def", "cleanTempDirs", "(", "job", ")", ":", "if", "job", "is", "CWLJob", "and", "job", ".", "_succeeded", ":", "# Only CWLJobs have this attribute.", "for", "tempDir", "in", "job", ".", "openTempDirs", ":", "if", "os", ".", "path", ".", "exists", "(", "t...
Remove temporarly created directories.
[ "Remove", "temporarly", "created", "directories", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L1031-L1037
225,052
DataBiosphere/toil
src/toil/cwl/cwltoil.py
StepValueFrom.do_eval
def do_eval(self, inputs, ctx): """Evalute ourselves.""" return cwltool.expression.do_eval( self.expr, inputs, self.req, None, None, {}, context=ctx)
python
def do_eval(self, inputs, ctx): return cwltool.expression.do_eval( self.expr, inputs, self.req, None, None, {}, context=ctx)
[ "def", "do_eval", "(", "self", ",", "inputs", ",", "ctx", ")", ":", "return", "cwltool", ".", "expression", ".", "do_eval", "(", "self", ".", "expr", ",", "inputs", ",", "self", ".", "req", ",", "None", ",", "None", ",", "{", "}", ",", "context", ...
Evalute ourselves.
[ "Evalute", "ourselves", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L140-L143
225,053
DataBiosphere/toil
src/toil/cwl/cwltoil.py
DefaultWithSource.resolve
def resolve(self): """Determine the final input value.""" if self.source: result = self.source[1][self.source[0]] if result: return result return self.default
python
def resolve(self): if self.source: result = self.source[1][self.source[0]] if result: return result return self.default
[ "def", "resolve", "(", "self", ")", ":", "if", "self", ".", "source", ":", "result", "=", "self", ".", "source", "[", "1", "]", "[", "self", ".", "source", "[", "0", "]", "]", "if", "result", ":", "return", "result", "return", "self", ".", "defau...
Determine the final input value.
[ "Determine", "the", "final", "input", "value", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L152-L158
225,054
DataBiosphere/toil
src/toil/batchSystems/lsfHelper.py
find
def find(basedir, string): """ walk basedir and return all files matching string """ matches = [] for root, dirnames, filenames in os.walk(basedir): for filename in fnmatch.filter(filenames, string): matches.append(os.path.join(root, filename)) return matches
python
def find(basedir, string): matches = [] for root, dirnames, filenames in os.walk(basedir): for filename in fnmatch.filter(filenames, string): matches.append(os.path.join(root, filename)) return matches
[ "def", "find", "(", "basedir", ",", "string", ")", ":", "matches", "=", "[", "]", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "basedir", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames"...
walk basedir and return all files matching string
[ "walk", "basedir", "and", "return", "all", "files", "matching", "string" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/lsfHelper.py#L39-L47
225,055
DataBiosphere/toil
src/toil/batchSystems/lsfHelper.py
find_first_match
def find_first_match(basedir, string): """ return the first file that matches string starting from basedir """ matches = find(basedir, string) return matches[0] if matches else matches
python
def find_first_match(basedir, string): matches = find(basedir, string) return matches[0] if matches else matches
[ "def", "find_first_match", "(", "basedir", ",", "string", ")", ":", "matches", "=", "find", "(", "basedir", ",", "string", ")", "return", "matches", "[", "0", "]", "if", "matches", "else", "matches" ]
return the first file that matches string starting from basedir
[ "return", "the", "first", "file", "that", "matches", "string", "starting", "from", "basedir" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/lsfHelper.py#L49-L54
225,056
DataBiosphere/toil
src/toil/batchSystems/lsfHelper.py
tokenize_conf_stream
def tokenize_conf_stream(conf_handle): """ convert the key=val pairs in a LSF config stream to tuples of tokens """ for line in conf_handle: if line.startswith("#"): continue tokens = line.split("=") if len(tokens) != 2: continue yield (tokens[0].strip(), tokens[1].strip())
python
def tokenize_conf_stream(conf_handle): for line in conf_handle: if line.startswith("#"): continue tokens = line.split("=") if len(tokens) != 2: continue yield (tokens[0].strip(), tokens[1].strip())
[ "def", "tokenize_conf_stream", "(", "conf_handle", ")", ":", "for", "line", "in", "conf_handle", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "tokens", "=", "line", ".", "split", "(", "\"=\"", ")", "if", "len", "(", "tokens",...
convert the key=val pairs in a LSF config stream to tuples of tokens
[ "convert", "the", "key", "=", "val", "pairs", "in", "a", "LSF", "config", "stream", "to", "tuples", "of", "tokens" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/lsfHelper.py#L85-L95
225,057
DataBiosphere/toil
src/toil/batchSystems/lsfHelper.py
apply_bparams
def apply_bparams(fn): """ apply fn to each line of bparams, returning the result """ cmd = ["bparams", "-a"] try: output = subprocess.check_output(cmd).decode('utf-8') except: return None return fn(output.split("\n"))
python
def apply_bparams(fn): cmd = ["bparams", "-a"] try: output = subprocess.check_output(cmd).decode('utf-8') except: return None return fn(output.split("\n"))
[ "def", "apply_bparams", "(", "fn", ")", ":", "cmd", "=", "[", "\"bparams\"", ",", "\"-a\"", "]", "try", ":", "output", "=", "subprocess", ".", "check_output", "(", "cmd", ")", ".", "decode", "(", "'utf-8'", ")", "except", ":", "return", "None", "return...
apply fn to each line of bparams, returning the result
[ "apply", "fn", "to", "each", "line", "of", "bparams", "returning", "the", "result" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/lsfHelper.py#L97-L106
225,058
DataBiosphere/toil
src/toil/batchSystems/lsfHelper.py
apply_lsadmin
def apply_lsadmin(fn): """ apply fn to each line of lsadmin, returning the result """ cmd = ["lsadmin", "showconf", "lim"] try: output = subprocess.check_output(cmd).decode('utf-8') except: return None return fn(output.split("\n"))
python
def apply_lsadmin(fn): cmd = ["lsadmin", "showconf", "lim"] try: output = subprocess.check_output(cmd).decode('utf-8') except: return None return fn(output.split("\n"))
[ "def", "apply_lsadmin", "(", "fn", ")", ":", "cmd", "=", "[", "\"lsadmin\"", ",", "\"showconf\"", ",", "\"lim\"", "]", "try", ":", "output", "=", "subprocess", ".", "check_output", "(", "cmd", ")", ".", "decode", "(", "'utf-8'", ")", "except", ":", "re...
apply fn to each line of lsadmin, returning the result
[ "apply", "fn", "to", "each", "line", "of", "lsadmin", "returning", "the", "result" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/lsfHelper.py#L108-L117
225,059
DataBiosphere/toil
src/toil/batchSystems/lsfHelper.py
get_lsf_units
def get_lsf_units(resource=False): """ check if we can find LSF_UNITS_FOR_LIMITS in lsadmin and lsf.conf files, preferring the value in bparams, then lsadmin, then the lsf.conf file """ lsf_units = apply_bparams(get_lsf_units_from_stream) if lsf_units: return lsf_units lsf_units = apply_lsadmin(get_lsf_units_from_stream) if lsf_units: return lsf_units lsf_units = apply_conf_file(get_lsf_units_from_stream, LSF_CONF_FILENAME) if lsf_units: return lsf_units # -R usage units are in MB, not KB by default if resource: return DEFAULT_RESOURCE_UNITS else: return DEFAULT_LSF_UNITS
python
def get_lsf_units(resource=False): lsf_units = apply_bparams(get_lsf_units_from_stream) if lsf_units: return lsf_units lsf_units = apply_lsadmin(get_lsf_units_from_stream) if lsf_units: return lsf_units lsf_units = apply_conf_file(get_lsf_units_from_stream, LSF_CONF_FILENAME) if lsf_units: return lsf_units # -R usage units are in MB, not KB by default if resource: return DEFAULT_RESOURCE_UNITS else: return DEFAULT_LSF_UNITS
[ "def", "get_lsf_units", "(", "resource", "=", "False", ")", ":", "lsf_units", "=", "apply_bparams", "(", "get_lsf_units_from_stream", ")", "if", "lsf_units", ":", "return", "lsf_units", "lsf_units", "=", "apply_lsadmin", "(", "get_lsf_units_from_stream", ")", "if", ...
check if we can find LSF_UNITS_FOR_LIMITS in lsadmin and lsf.conf files, preferring the value in bparams, then lsadmin, then the lsf.conf file
[ "check", "if", "we", "can", "find", "LSF_UNITS_FOR_LIMITS", "in", "lsadmin", "and", "lsf", ".", "conf", "files", "preferring", "the", "value", "in", "bparams", "then", "lsadmin", "then", "the", "lsf", ".", "conf", "file" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/lsfHelper.py#L120-L141
225,060
DataBiosphere/toil
src/toil/batchSystems/lsfHelper.py
parse_memory
def parse_memory(mem, resource): """ Parse memory parameter """ lsf_unit = get_lsf_units(resource=resource) return convert_mb(float(mem) * 1024, lsf_unit)
python
def parse_memory(mem, resource): lsf_unit = get_lsf_units(resource=resource) return convert_mb(float(mem) * 1024, lsf_unit)
[ "def", "parse_memory", "(", "mem", ",", "resource", ")", ":", "lsf_unit", "=", "get_lsf_units", "(", "resource", "=", "resource", ")", "return", "convert_mb", "(", "float", "(", "mem", ")", "*", "1024", ",", "lsf_unit", ")" ]
Parse memory parameter
[ "Parse", "memory", "parameter" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/lsfHelper.py#L155-L160
225,061
DataBiosphere/toil
src/toil/batchSystems/lsfHelper.py
per_core_reservation
def per_core_reservation(): """ returns True if the cluster is configured for reservations to be per core, False if it is per job """ per_core = apply_bparams(per_core_reserve_from_stream) if per_core: if per_core.upper() == "Y": return True else: return False per_core = apply_lsadmin(per_core_reserve_from_stream) if per_core: if per_core.upper() == "Y": return True else: return False per_core = apply_conf_file(per_core_reserve_from_stream, LSB_PARAMS_FILENAME) if per_core and per_core.upper() == "Y": return True else: return False return False
python
def per_core_reservation(): per_core = apply_bparams(per_core_reserve_from_stream) if per_core: if per_core.upper() == "Y": return True else: return False per_core = apply_lsadmin(per_core_reserve_from_stream) if per_core: if per_core.upper() == "Y": return True else: return False per_core = apply_conf_file(per_core_reserve_from_stream, LSB_PARAMS_FILENAME) if per_core and per_core.upper() == "Y": return True else: return False return False
[ "def", "per_core_reservation", "(", ")", ":", "per_core", "=", "apply_bparams", "(", "per_core_reserve_from_stream", ")", "if", "per_core", ":", "if", "per_core", ".", "upper", "(", ")", "==", "\"Y\"", ":", "return", "True", "else", ":", "return", "False", "...
returns True if the cluster is configured for reservations to be per core, False if it is per job
[ "returns", "True", "if", "the", "cluster", "is", "configured", "for", "reservations", "to", "be", "per", "core", "False", "if", "it", "is", "per", "job" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/lsfHelper.py#L163-L187
225,062
DataBiosphere/toil
src/toil/common.py
addOptions
def addOptions(parser, config=Config()): """ Adds toil options to a parser object, either optparse or argparse. """ # Wrapper function that allows toil to be used with both the optparse and # argparse option parsing modules addLoggingOptions(parser) # This adds the logging stuff. if isinstance(parser, ArgumentParser): def addGroup(headingString, bodyString): return parser.add_argument_group(headingString, bodyString).add_argument parser.register("type", "bool", lambda v: v.lower() == "true") # Custom type for arg=True/False. _addOptions(addGroup, config) else: raise RuntimeError("Unanticipated class passed to addOptions(), %s. Expecting " "argparse.ArgumentParser" % parser.__class__)
python
def addOptions(parser, config=Config()): # Wrapper function that allows toil to be used with both the optparse and # argparse option parsing modules addLoggingOptions(parser) # This adds the logging stuff. if isinstance(parser, ArgumentParser): def addGroup(headingString, bodyString): return parser.add_argument_group(headingString, bodyString).add_argument parser.register("type", "bool", lambda v: v.lower() == "true") # Custom type for arg=True/False. _addOptions(addGroup, config) else: raise RuntimeError("Unanticipated class passed to addOptions(), %s. Expecting " "argparse.ArgumentParser" % parser.__class__)
[ "def", "addOptions", "(", "parser", ",", "config", "=", "Config", "(", ")", ")", ":", "# Wrapper function that allows toil to be used with both the optparse and", "# argparse option parsing modules", "addLoggingOptions", "(", "parser", ")", "# This adds the logging stuff.", "if...
Adds toil options to a parser object, either optparse or argparse.
[ "Adds", "toil", "options", "to", "a", "parser", "object", "either", "optparse", "or", "argparse", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L586-L601
225,063
DataBiosphere/toil
src/toil/common.py
parseSetEnv
def parseSetEnv(l): """ Parses a list of strings of the form "NAME=VALUE" or just "NAME" into a dictionary. Strings of the latter from will result in dictionary entries whose value is None. :type l: list[str] :rtype: dict[str,str] >>> parseSetEnv([]) {} >>> parseSetEnv(['a']) {'a': None} >>> parseSetEnv(['a=']) {'a': ''} >>> parseSetEnv(['a=b']) {'a': 'b'} >>> parseSetEnv(['a=a', 'a=b']) {'a': 'b'} >>> parseSetEnv(['a=b', 'c=d']) {'a': 'b', 'c': 'd'} >>> parseSetEnv(['a=b=c']) {'a': 'b=c'} >>> parseSetEnv(['']) Traceback (most recent call last): ... ValueError: Empty name >>> parseSetEnv(['=1']) Traceback (most recent call last): ... ValueError: Empty name """ d = dict() for i in l: try: k, v = i.split('=', 1) except ValueError: k, v = i, None if not k: raise ValueError('Empty name') d[k] = v return d
python
def parseSetEnv(l): d = dict() for i in l: try: k, v = i.split('=', 1) except ValueError: k, v = i, None if not k: raise ValueError('Empty name') d[k] = v return d
[ "def", "parseSetEnv", "(", "l", ")", ":", "d", "=", "dict", "(", ")", "for", "i", "in", "l", ":", "try", ":", "k", ",", "v", "=", "i", ".", "split", "(", "'='", ",", "1", ")", "except", "ValueError", ":", "k", ",", "v", "=", "i", ",", "No...
Parses a list of strings of the form "NAME=VALUE" or just "NAME" into a dictionary. Strings of the latter from will result in dictionary entries whose value is None. :type l: list[str] :rtype: dict[str,str] >>> parseSetEnv([]) {} >>> parseSetEnv(['a']) {'a': None} >>> parseSetEnv(['a=']) {'a': ''} >>> parseSetEnv(['a=b']) {'a': 'b'} >>> parseSetEnv(['a=a', 'a=b']) {'a': 'b'} >>> parseSetEnv(['a=b', 'c=d']) {'a': 'b', 'c': 'd'} >>> parseSetEnv(['a=b=c']) {'a': 'b=c'} >>> parseSetEnv(['']) Traceback (most recent call last): ... ValueError: Empty name >>> parseSetEnv(['=1']) Traceback (most recent call last): ... ValueError: Empty name
[ "Parses", "a", "list", "of", "strings", "of", "the", "form", "NAME", "=", "VALUE", "or", "just", "NAME", "into", "a", "dictionary", ".", "Strings", "of", "the", "latter", "from", "will", "result", "in", "dictionary", "entries", "whose", "value", "is", "N...
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L1244-L1284
225,064
DataBiosphere/toil
src/toil/common.py
getDirSizeRecursively
def getDirSizeRecursively(dirPath): """ This method will return the cumulative number of bytes occupied by the files on disk in the directory and its subdirectories. This method will raise a 'subprocess.CalledProcessError' if it is unable to access a folder or file because of insufficient permissions. Therefore this method should only be called on the jobStore, and will alert the user if some portion is inaccessible. Everything in the jobStore should have appropriate permissions as there is no way to read the filesize without permissions. The environment variable 'BLOCKSIZE'='512' is set instead of the much cleaner --block-size=1 because Apple can't handle it. :param str dirPath: A valid path to a directory or file. :return: Total size, in bytes, of the file or directory at dirPath. """ # du is often faster than using os.lstat(), sometimes significantly so. # The call: 'du -s /some/path' should give the number of 512-byte blocks # allocated with the environment variable: BLOCKSIZE='512' set, and we # multiply this by 512 to return the filesize in bytes. return int(subprocess.check_output(['du', '-s', dirPath], env=dict(os.environ, BLOCKSIZE='512')).decode('utf-8').split()[0]) * 512
python
def getDirSizeRecursively(dirPath): # du is often faster than using os.lstat(), sometimes significantly so. # The call: 'du -s /some/path' should give the number of 512-byte blocks # allocated with the environment variable: BLOCKSIZE='512' set, and we # multiply this by 512 to return the filesize in bytes. return int(subprocess.check_output(['du', '-s', dirPath], env=dict(os.environ, BLOCKSIZE='512')).decode('utf-8').split()[0]) * 512
[ "def", "getDirSizeRecursively", "(", "dirPath", ")", ":", "# du is often faster than using os.lstat(), sometimes significantly so.", "# The call: 'du -s /some/path' should give the number of 512-byte blocks", "# allocated with the environment variable: BLOCKSIZE='512' set, and we", "# multiply thi...
This method will return the cumulative number of bytes occupied by the files on disk in the directory and its subdirectories. This method will raise a 'subprocess.CalledProcessError' if it is unable to access a folder or file because of insufficient permissions. Therefore this method should only be called on the jobStore, and will alert the user if some portion is inaccessible. Everything in the jobStore should have appropriate permissions as there is no way to read the filesize without permissions. The environment variable 'BLOCKSIZE'='512' is set instead of the much cleaner --block-size=1 because Apple can't handle it. :param str dirPath: A valid path to a directory or file. :return: Total size, in bytes, of the file or directory at dirPath.
[ "This", "method", "will", "return", "the", "cumulative", "number", "of", "bytes", "occupied", "by", "the", "files", "on", "disk", "in", "the", "directory", "and", "its", "subdirectories", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L1310-L1334
225,065
DataBiosphere/toil
src/toil/common.py
getFileSystemSize
def getFileSystemSize(dirPath): """ Return the free space, and total size of the file system hosting `dirPath`. :param str dirPath: A valid path to a directory. :return: free space and total size of file system :rtype: tuple """ assert os.path.exists(dirPath) diskStats = os.statvfs(dirPath) freeSpace = diskStats.f_frsize * diskStats.f_bavail diskSize = diskStats.f_frsize * diskStats.f_blocks return freeSpace, diskSize
python
def getFileSystemSize(dirPath): assert os.path.exists(dirPath) diskStats = os.statvfs(dirPath) freeSpace = diskStats.f_frsize * diskStats.f_bavail diskSize = diskStats.f_frsize * diskStats.f_blocks return freeSpace, diskSize
[ "def", "getFileSystemSize", "(", "dirPath", ")", ":", "assert", "os", ".", "path", ".", "exists", "(", "dirPath", ")", "diskStats", "=", "os", ".", "statvfs", "(", "dirPath", ")", "freeSpace", "=", "diskStats", ".", "f_frsize", "*", "diskStats", ".", "f_...
Return the free space, and total size of the file system hosting `dirPath`. :param str dirPath: A valid path to a directory. :return: free space and total size of file system :rtype: tuple
[ "Return", "the", "free", "space", "and", "total", "size", "of", "the", "file", "system", "hosting", "dirPath", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L1337-L1349
225,066
DataBiosphere/toil
src/toil/common.py
Toil.restart
def restart(self): """ Restarts a workflow that has been interrupted. :return: The root job's return value """ self._assertContextManagerUsed() self.writePIDFile() if not self.config.restart: raise ToilRestartException('A Toil workflow must be initiated with Toil.start(), ' 'not restart().') from toil.job import JobException try: self._jobStore.loadRootJob() except JobException: logger.warning( 'Requested restart but the workflow has already been completed; allowing exports to rerun.') return self._jobStore.getRootJobReturnValue() self._batchSystem = self.createBatchSystem(self.config) self._setupAutoDeployment() try: self._setBatchSystemEnvVars() self._serialiseEnv() self._cacheAllJobs() self._setProvisioner() rootJobGraph = self._jobStore.clean(jobCache=self._jobCache) return self._runMainLoop(rootJobGraph) finally: self._shutdownBatchSystem()
python
def restart(self): self._assertContextManagerUsed() self.writePIDFile() if not self.config.restart: raise ToilRestartException('A Toil workflow must be initiated with Toil.start(), ' 'not restart().') from toil.job import JobException try: self._jobStore.loadRootJob() except JobException: logger.warning( 'Requested restart but the workflow has already been completed; allowing exports to rerun.') return self._jobStore.getRootJobReturnValue() self._batchSystem = self.createBatchSystem(self.config) self._setupAutoDeployment() try: self._setBatchSystemEnvVars() self._serialiseEnv() self._cacheAllJobs() self._setProvisioner() rootJobGraph = self._jobStore.clean(jobCache=self._jobCache) return self._runMainLoop(rootJobGraph) finally: self._shutdownBatchSystem()
[ "def", "restart", "(", "self", ")", ":", "self", ".", "_assertContextManagerUsed", "(", ")", "self", ".", "writePIDFile", "(", ")", "if", "not", "self", ".", "config", ".", "restart", ":", "raise", "ToilRestartException", "(", "'A Toil workflow must be initiated...
Restarts a workflow that has been interrupted. :return: The root job's return value
[ "Restarts", "a", "workflow", "that", "has", "been", "interrupted", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L777-L807
225,067
DataBiosphere/toil
src/toil/common.py
Toil.getJobStore
def getJobStore(cls, locator): """ Create an instance of the concrete job store implementation that matches the given locator. :param str locator: The location of the job store to be represent by the instance :return: an instance of a concrete subclass of AbstractJobStore :rtype: toil.jobStores.abstractJobStore.AbstractJobStore """ name, rest = cls.parseLocator(locator) if name == 'file': from toil.jobStores.fileJobStore import FileJobStore return FileJobStore(rest) elif name == 'aws': from toil.jobStores.aws.jobStore import AWSJobStore return AWSJobStore(rest) elif name == 'azure': from toil.jobStores.azureJobStore import AzureJobStore return AzureJobStore(rest) elif name == 'google': from toil.jobStores.googleJobStore import GoogleJobStore return GoogleJobStore(rest) else: raise RuntimeError("Unknown job store implementation '%s'" % name)
python
def getJobStore(cls, locator): name, rest = cls.parseLocator(locator) if name == 'file': from toil.jobStores.fileJobStore import FileJobStore return FileJobStore(rest) elif name == 'aws': from toil.jobStores.aws.jobStore import AWSJobStore return AWSJobStore(rest) elif name == 'azure': from toil.jobStores.azureJobStore import AzureJobStore return AzureJobStore(rest) elif name == 'google': from toil.jobStores.googleJobStore import GoogleJobStore return GoogleJobStore(rest) else: raise RuntimeError("Unknown job store implementation '%s'" % name)
[ "def", "getJobStore", "(", "cls", ",", "locator", ")", ":", "name", ",", "rest", "=", "cls", ".", "parseLocator", "(", "locator", ")", "if", "name", "==", "'file'", ":", "from", "toil", ".", "jobStores", ".", "fileJobStore", "import", "FileJobStore", "re...
Create an instance of the concrete job store implementation that matches the given locator. :param str locator: The location of the job store to be represent by the instance :return: an instance of a concrete subclass of AbstractJobStore :rtype: toil.jobStores.abstractJobStore.AbstractJobStore
[ "Create", "an", "instance", "of", "the", "concrete", "job", "store", "implementation", "that", "matches", "the", "given", "locator", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L821-L844
225,068
DataBiosphere/toil
src/toil/common.py
Toil.createBatchSystem
def createBatchSystem(config): """ Creates an instance of the batch system specified in the given config. :param toil.common.Config config: the current configuration :rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem :return: an instance of a concrete subclass of AbstractBatchSystem """ kwargs = dict(config=config, maxCores=config.maxCores, maxMemory=config.maxMemory, maxDisk=config.maxDisk) from toil.batchSystems.registry import batchSystemFactoryFor try: factory = batchSystemFactoryFor(config.batchSystem) batchSystemClass = factory() except: raise RuntimeError('Unrecognised batch system: %s' % config.batchSystem) if not config.disableCaching and not batchSystemClass.supportsWorkerCleanup(): raise RuntimeError('%s currently does not support shared caching. Set the ' '--disableCaching flag if you want to ' 'use this batch system.' % config.batchSystem) logger.debug('Using the %s' % re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()) return batchSystemClass(**kwargs)
python
def createBatchSystem(config): kwargs = dict(config=config, maxCores=config.maxCores, maxMemory=config.maxMemory, maxDisk=config.maxDisk) from toil.batchSystems.registry import batchSystemFactoryFor try: factory = batchSystemFactoryFor(config.batchSystem) batchSystemClass = factory() except: raise RuntimeError('Unrecognised batch system: %s' % config.batchSystem) if not config.disableCaching and not batchSystemClass.supportsWorkerCleanup(): raise RuntimeError('%s currently does not support shared caching. Set the ' '--disableCaching flag if you want to ' 'use this batch system.' % config.batchSystem) logger.debug('Using the %s' % re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()) return batchSystemClass(**kwargs)
[ "def", "createBatchSystem", "(", "config", ")", ":", "kwargs", "=", "dict", "(", "config", "=", "config", ",", "maxCores", "=", "config", ".", "maxCores", ",", "maxMemory", "=", "config", ".", "maxMemory", ",", "maxDisk", "=", "config", ".", "maxDisk", "...
Creates an instance of the batch system specified in the given config. :param toil.common.Config config: the current configuration :rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem :return: an instance of a concrete subclass of AbstractBatchSystem
[ "Creates", "an", "instance", "of", "the", "batch", "system", "specified", "in", "the", "given", "config", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L870-L900
225,069
DataBiosphere/toil
src/toil/common.py
Toil._setupAutoDeployment
def _setupAutoDeployment(self, userScript=None): """ Determine the user script, save it to the job store and inject a reference to the saved copy into the batch system such that it can auto-deploy the resource on the worker nodes. :param toil.resource.ModuleDescriptor userScript: the module descriptor referencing the user script. If None, it will be looked up in the job store. """ if userScript is not None: # This branch is hit when a workflow is being started if userScript.belongsToToil: logger.debug('User script %s belongs to Toil. No need to auto-deploy it.', userScript) userScript = None else: if (self._batchSystem.supportsAutoDeployment() and not self.config.disableAutoDeployment): # Note that by saving the ModuleDescriptor, and not the Resource we allow for # redeploying a potentially modified user script on workflow restarts. with self._jobStore.writeSharedFileStream('userScript') as f: pickle.dump(userScript, f, protocol=pickle.HIGHEST_PROTOCOL) else: from toil.batchSystems.singleMachine import SingleMachineBatchSystem if not isinstance(self._batchSystem, SingleMachineBatchSystem): logger.warn('Batch system does not support auto-deployment. The user ' 'script %s will have to be present at the same location on ' 'every worker.', userScript) userScript = None else: # This branch is hit on restarts from toil.jobStores.abstractJobStore import NoSuchFileException try: with self._jobStore.readSharedFileStream('userScript') as f: userScript = safeUnpickleFromStream(f) except NoSuchFileException: logger.debug('User script neither set explicitly nor present in the job store.') userScript = None if userScript is None: logger.debug('No user script to auto-deploy.') else: logger.debug('Saving user script %s as a resource', userScript) userScriptResource = userScript.saveAsResourceTo(self._jobStore) logger.debug('Injecting user script %s into batch system.', userScriptResource) self._batchSystem.setUserScript(userScriptResource)
python
def _setupAutoDeployment(self, userScript=None): if userScript is not None: # This branch is hit when a workflow is being started if userScript.belongsToToil: logger.debug('User script %s belongs to Toil. No need to auto-deploy it.', userScript) userScript = None else: if (self._batchSystem.supportsAutoDeployment() and not self.config.disableAutoDeployment): # Note that by saving the ModuleDescriptor, and not the Resource we allow for # redeploying a potentially modified user script on workflow restarts. with self._jobStore.writeSharedFileStream('userScript') as f: pickle.dump(userScript, f, protocol=pickle.HIGHEST_PROTOCOL) else: from toil.batchSystems.singleMachine import SingleMachineBatchSystem if not isinstance(self._batchSystem, SingleMachineBatchSystem): logger.warn('Batch system does not support auto-deployment. The user ' 'script %s will have to be present at the same location on ' 'every worker.', userScript) userScript = None else: # This branch is hit on restarts from toil.jobStores.abstractJobStore import NoSuchFileException try: with self._jobStore.readSharedFileStream('userScript') as f: userScript = safeUnpickleFromStream(f) except NoSuchFileException: logger.debug('User script neither set explicitly nor present in the job store.') userScript = None if userScript is None: logger.debug('No user script to auto-deploy.') else: logger.debug('Saving user script %s as a resource', userScript) userScriptResource = userScript.saveAsResourceTo(self._jobStore) logger.debug('Injecting user script %s into batch system.', userScriptResource) self._batchSystem.setUserScript(userScriptResource)
[ "def", "_setupAutoDeployment", "(", "self", ",", "userScript", "=", "None", ")", ":", "if", "userScript", "is", "not", "None", ":", "# This branch is hit when a workflow is being started", "if", "userScript", ".", "belongsToToil", ":", "logger", ".", "debug", "(", ...
Determine the user script, save it to the job store and inject a reference to the saved copy into the batch system such that it can auto-deploy the resource on the worker nodes. :param toil.resource.ModuleDescriptor userScript: the module descriptor referencing the user script. If None, it will be looked up in the job store.
[ "Determine", "the", "user", "script", "save", "it", "to", "the", "job", "store", "and", "inject", "a", "reference", "to", "the", "saved", "copy", "into", "the", "batch", "system", "such", "that", "it", "can", "auto", "-", "deploy", "the", "resource", "on...
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L902-L945
225,070
DataBiosphere/toil
src/toil/common.py
Toil.importFile
def importFile(self, srcUrl, sharedFileName=None): """ Imports the file at the given URL into job store. See :func:`toil.jobStores.abstractJobStore.AbstractJobStore.importFile` for a full description """ self._assertContextManagerUsed() return self._jobStore.importFile(srcUrl, sharedFileName=sharedFileName)
python
def importFile(self, srcUrl, sharedFileName=None): self._assertContextManagerUsed() return self._jobStore.importFile(srcUrl, sharedFileName=sharedFileName)
[ "def", "importFile", "(", "self", ",", "srcUrl", ",", "sharedFileName", "=", "None", ")", ":", "self", ".", "_assertContextManagerUsed", "(", ")", "return", "self", ".", "_jobStore", ".", "importFile", "(", "srcUrl", ",", "sharedFileName", "=", "sharedFileName...
Imports the file at the given URL into job store. See :func:`toil.jobStores.abstractJobStore.AbstractJobStore.importFile` for a full description
[ "Imports", "the", "file", "at", "the", "given", "URL", "into", "job", "store", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L947-L955
225,071
DataBiosphere/toil
src/toil/common.py
Toil._setBatchSystemEnvVars
def _setBatchSystemEnvVars(self): """ Sets the environment variables required by the job store and those passed on command line. """ for envDict in (self._jobStore.getEnv(), self.config.environment): for k, v in iteritems(envDict): self._batchSystem.setEnv(k, v)
python
def _setBatchSystemEnvVars(self): for envDict in (self._jobStore.getEnv(), self.config.environment): for k, v in iteritems(envDict): self._batchSystem.setEnv(k, v)
[ "def", "_setBatchSystemEnvVars", "(", "self", ")", ":", "for", "envDict", "in", "(", "self", ".", "_jobStore", ".", "getEnv", "(", ")", ",", "self", ".", "config", ".", "environment", ")", ":", "for", "k", ",", "v", "in", "iteritems", "(", "envDict", ...
Sets the environment variables required by the job store and those passed on command line.
[ "Sets", "the", "environment", "variables", "required", "by", "the", "job", "store", "and", "those", "passed", "on", "command", "line", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L967-L973
225,072
DataBiosphere/toil
src/toil/common.py
Toil._serialiseEnv
def _serialiseEnv(self): """ Puts the environment in a globally accessible pickle file. """ # Dump out the environment of this process in the environment pickle file. with self._jobStore.writeSharedFileStream("environment.pickle") as fileHandle: pickle.dump(dict(os.environ), fileHandle, pickle.HIGHEST_PROTOCOL) logger.debug("Written the environment for the jobs to the environment file")
python
def _serialiseEnv(self): # Dump out the environment of this process in the environment pickle file. with self._jobStore.writeSharedFileStream("environment.pickle") as fileHandle: pickle.dump(dict(os.environ), fileHandle, pickle.HIGHEST_PROTOCOL) logger.debug("Written the environment for the jobs to the environment file")
[ "def", "_serialiseEnv", "(", "self", ")", ":", "# Dump out the environment of this process in the environment pickle file.", "with", "self", ".", "_jobStore", ".", "writeSharedFileStream", "(", "\"environment.pickle\"", ")", "as", "fileHandle", ":", "pickle", ".", "dump", ...
Puts the environment in a globally accessible pickle file.
[ "Puts", "the", "environment", "in", "a", "globally", "accessible", "pickle", "file", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L975-L982
225,073
DataBiosphere/toil
src/toil/common.py
Toil._cacheAllJobs
def _cacheAllJobs(self): """ Downloads all jobs in the current job store into self.jobCache. """ logger.debug('Caching all jobs in job store') self._jobCache = {jobGraph.jobStoreID: jobGraph for jobGraph in self._jobStore.jobs()} logger.debug('{} jobs downloaded.'.format(len(self._jobCache)))
python
def _cacheAllJobs(self): logger.debug('Caching all jobs in job store') self._jobCache = {jobGraph.jobStoreID: jobGraph for jobGraph in self._jobStore.jobs()} logger.debug('{} jobs downloaded.'.format(len(self._jobCache)))
[ "def", "_cacheAllJobs", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Caching all jobs in job store'", ")", "self", ".", "_jobCache", "=", "{", "jobGraph", ".", "jobStoreID", ":", "jobGraph", "for", "jobGraph", "in", "self", ".", "_jobStore", ".", "j...
Downloads all jobs in the current job store into self.jobCache.
[ "Downloads", "all", "jobs", "in", "the", "current", "job", "store", "into", "self", ".", "jobCache", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L984-L990
225,074
DataBiosphere/toil
src/toil/common.py
Toil.getWorkflowDir
def getWorkflowDir(workflowID, configWorkDir=None): """ Returns a path to the directory where worker directories and the cache will be located for this workflow. :param str workflowID: Unique identifier for the workflow :param str configWorkDir: Value passed to the program using the --workDir flag :return: Path to the workflow directory :rtype: str """ workDir = configWorkDir or os.getenv('TOIL_WORKDIR') or tempfile.gettempdir() if not os.path.exists(workDir): raise RuntimeError("The directory specified by --workDir or TOIL_WORKDIR (%s) does not " "exist." % workDir) # Create the workflow dir, make it unique to each host in case workDir is on a shared FS. # This prevents workers on different nodes from erasing each other's directories. workflowDir = os.path.join(workDir, 'toil-%s-%s' % (workflowID, getNodeID())) try: # Directory creation is atomic os.mkdir(workflowDir) except OSError as err: if err.errno != 17: # The directory exists if a previous worker set it up. raise else: logger.debug('Created the workflow directory at %s' % workflowDir) return workflowDir
python
def getWorkflowDir(workflowID, configWorkDir=None): workDir = configWorkDir or os.getenv('TOIL_WORKDIR') or tempfile.gettempdir() if not os.path.exists(workDir): raise RuntimeError("The directory specified by --workDir or TOIL_WORKDIR (%s) does not " "exist." % workDir) # Create the workflow dir, make it unique to each host in case workDir is on a shared FS. # This prevents workers on different nodes from erasing each other's directories. workflowDir = os.path.join(workDir, 'toil-%s-%s' % (workflowID, getNodeID())) try: # Directory creation is atomic os.mkdir(workflowDir) except OSError as err: if err.errno != 17: # The directory exists if a previous worker set it up. raise else: logger.debug('Created the workflow directory at %s' % workflowDir) return workflowDir
[ "def", "getWorkflowDir", "(", "workflowID", ",", "configWorkDir", "=", "None", ")", ":", "workDir", "=", "configWorkDir", "or", "os", ".", "getenv", "(", "'TOIL_WORKDIR'", ")", "or", "tempfile", ".", "gettempdir", "(", ")", "if", "not", "os", ".", "path", ...
Returns a path to the directory where worker directories and the cache will be located for this workflow. :param str workflowID: Unique identifier for the workflow :param str configWorkDir: Value passed to the program using the --workDir flag :return: Path to the workflow directory :rtype: str
[ "Returns", "a", "path", "to", "the", "directory", "where", "worker", "directories", "and", "the", "cache", "will", "be", "located", "for", "this", "workflow", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L1001-L1027
225,075
DataBiosphere/toil
src/toil/common.py
Toil._shutdownBatchSystem
def _shutdownBatchSystem(self): """ Shuts down current batch system if it has been created. """ assert self._batchSystem is not None startTime = time.time() logger.debug('Shutting down batch system ...') self._batchSystem.shutdown() logger.debug('... finished shutting down the batch system in %s seconds.' % (time.time() - startTime))
python
def _shutdownBatchSystem(self): assert self._batchSystem is not None startTime = time.time() logger.debug('Shutting down batch system ...') self._batchSystem.shutdown() logger.debug('... finished shutting down the batch system in %s seconds.' % (time.time() - startTime))
[ "def", "_shutdownBatchSystem", "(", "self", ")", ":", "assert", "self", ".", "_batchSystem", "is", "not", "None", "startTime", "=", "time", ".", "time", "(", ")", "logger", ".", "debug", "(", "'Shutting down batch system ...'", ")", "self", ".", "_batchSystem"...
Shuts down current batch system if it has been created.
[ "Shuts", "down", "current", "batch", "system", "if", "it", "has", "been", "created", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L1048-L1058
225,076
DataBiosphere/toil
src/toil/common.py
Toil.writePIDFile
def writePIDFile(self): """ Write a the pid of this process to a file in the jobstore. Overwriting the current contents of pid.log is a feature, not a bug of this method. Other methods will rely on always having the most current pid available. So far there is no reason to store any old pids. """ with self._jobStore.writeSharedFileStream('pid.log') as f: f.write(str(os.getpid()).encode('utf-8'))
python
def writePIDFile(self): with self._jobStore.writeSharedFileStream('pid.log') as f: f.write(str(os.getpid()).encode('utf-8'))
[ "def", "writePIDFile", "(", "self", ")", ":", "with", "self", ".", "_jobStore", ".", "writeSharedFileStream", "(", "'pid.log'", ")", "as", "f", ":", "f", ".", "write", "(", "str", "(", "os", ".", "getpid", "(", ")", ")", ".", "encode", "(", "'utf-8'"...
Write a the pid of this process to a file in the jobstore. Overwriting the current contents of pid.log is a feature, not a bug of this method. Other methods will rely on always having the most current pid available. So far there is no reason to store any old pids.
[ "Write", "a", "the", "pid", "of", "this", "process", "to", "a", "file", "in", "the", "jobstore", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L1064-L1073
225,077
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.find_asts
def find_asts(self, ast_root, name): ''' Finds an AST node with the given name and the entire subtree under it. A function borrowed from scottfrazer. Thank you Scott Frazer! :param ast_root: The WDL AST. The whole thing generally, but really any portion that you wish to search. :param name: The name of the subtree you're looking for, like "Task". :return: nodes representing the AST subtrees matching the "name" given. ''' nodes = [] if isinstance(ast_root, wdl_parser.AstList): for node in ast_root: nodes.extend(self.find_asts(node, name)) elif isinstance(ast_root, wdl_parser.Ast): if ast_root.name == name: nodes.append(ast_root) for attr_name, attr in ast_root.attributes.items(): nodes.extend(self.find_asts(attr, name)) return nodes
python
def find_asts(self, ast_root, name): ''' Finds an AST node with the given name and the entire subtree under it. A function borrowed from scottfrazer. Thank you Scott Frazer! :param ast_root: The WDL AST. The whole thing generally, but really any portion that you wish to search. :param name: The name of the subtree you're looking for, like "Task". :return: nodes representing the AST subtrees matching the "name" given. ''' nodes = [] if isinstance(ast_root, wdl_parser.AstList): for node in ast_root: nodes.extend(self.find_asts(node, name)) elif isinstance(ast_root, wdl_parser.Ast): if ast_root.name == name: nodes.append(ast_root) for attr_name, attr in ast_root.attributes.items(): nodes.extend(self.find_asts(attr, name)) return nodes
[ "def", "find_asts", "(", "self", ",", "ast_root", ",", "name", ")", ":", "nodes", "=", "[", "]", "if", "isinstance", "(", "ast_root", ",", "wdl_parser", ".", "AstList", ")", ":", "for", "node", "in", "ast_root", ":", "nodes", ".", "extend", "(", "sel...
Finds an AST node with the given name and the entire subtree under it. A function borrowed from scottfrazer. Thank you Scott Frazer! :param ast_root: The WDL AST. The whole thing generally, but really any portion that you wish to search. :param name: The name of the subtree you're looking for, like "Task". :return: nodes representing the AST subtrees matching the "name" given.
[ "Finds", "an", "AST", "node", "with", "the", "given", "name", "and", "the", "entire", "subtree", "under", "it", ".", "A", "function", "borrowed", "from", "scottfrazer", ".", "Thank", "you", "Scott", "Frazer!" ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L82-L101
225,078
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.dict_from_JSON
def dict_from_JSON(self, JSON_file): ''' Takes a WDL-mapped json file and creates a dict containing the bindings. The 'return' value is only used for unittests. :param JSON_file: A required JSON file containing WDL variable bindings. :return: Returns the self.json_dict purely for unittests. ''' # TODO: Add context support for variables within multiple wdl files with open(JSON_file) as data_file: data = json.load(data_file) for d in data: if isinstance(data[d], basestring): self.json_dict[d] = '"' + data[d] + '"' else: self.json_dict[d] = data[d] return self.json_dict
python
def dict_from_JSON(self, JSON_file): ''' Takes a WDL-mapped json file and creates a dict containing the bindings. The 'return' value is only used for unittests. :param JSON_file: A required JSON file containing WDL variable bindings. :return: Returns the self.json_dict purely for unittests. ''' # TODO: Add context support for variables within multiple wdl files with open(JSON_file) as data_file: data = json.load(data_file) for d in data: if isinstance(data[d], basestring): self.json_dict[d] = '"' + data[d] + '"' else: self.json_dict[d] = data[d] return self.json_dict
[ "def", "dict_from_JSON", "(", "self", ",", "JSON_file", ")", ":", "# TODO: Add context support for variables within multiple wdl files", "with", "open", "(", "JSON_file", ")", "as", "data_file", ":", "data", "=", "json", ".", "load", "(", "data_file", ")", "for", ...
Takes a WDL-mapped json file and creates a dict containing the bindings. The 'return' value is only used for unittests. :param JSON_file: A required JSON file containing WDL variable bindings. :return: Returns the self.json_dict purely for unittests.
[ "Takes", "a", "WDL", "-", "mapped", "json", "file", "and", "creates", "a", "dict", "containing", "the", "bindings", ".", "The", "return", "value", "is", "only", "used", "for", "unittests", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L112-L129
225,079
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.create_tasks_dict
def create_tasks_dict(self, ast): ''' Parse each "Task" in the AST. This will create self.tasks_dictionary, where each task name is a key. :return: Creates the self.tasks_dictionary necessary for much of the parser. Returning it is only necessary for unittests. ''' tasks = self.find_asts(ast, 'Task') for task in tasks: self.parse_task(task) return self.tasks_dictionary
python
def create_tasks_dict(self, ast): ''' Parse each "Task" in the AST. This will create self.tasks_dictionary, where each task name is a key. :return: Creates the self.tasks_dictionary necessary for much of the parser. Returning it is only necessary for unittests. ''' tasks = self.find_asts(ast, 'Task') for task in tasks: self.parse_task(task) return self.tasks_dictionary
[ "def", "create_tasks_dict", "(", "self", ",", "ast", ")", ":", "tasks", "=", "self", ".", "find_asts", "(", "ast", ",", "'Task'", ")", "for", "task", "in", "tasks", ":", "self", ".", "parse_task", "(", "task", ")", "return", "self", ".", "tasks_diction...
Parse each "Task" in the AST. This will create self.tasks_dictionary, where each task name is a key. :return: Creates the self.tasks_dictionary necessary for much of the parser. Returning it is only necessary for unittests.
[ "Parse", "each", "Task", "in", "the", "AST", ".", "This", "will", "create", "self", ".", "tasks_dictionary", "where", "each", "task", "name", "is", "a", "key", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L131-L142
225,080
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_task
def parse_task(self, task): ''' Parses a WDL task AST subtree. Currently looks at and parses 4 sections: 1. Declarations (e.g. string x = 'helloworld') 2. Commandline (a bash command with dynamic variables inserted) 3. Runtime (docker image; disk; CPU; RAM; etc.) 4. Outputs (expected return values/files) :param task: An AST subtree of a WDL "Task". :return: Returns nothing but adds a task to the self.tasks_dictionary necessary for much of the parser. ''' task_name = task.attributes["name"].source_string # task declarations declaration_array = [] for declaration_subAST in task.attr("declarations"): declaration_array.append(self.parse_task_declaration(declaration_subAST)) self.tasks_dictionary.setdefault(task_name, OrderedDict())['inputs'] = declaration_array for section in task.attr("sections"): # task commandline entries section [command(s) to run] if section.name == "RawCommand": command_array = self.parse_task_rawcommand(section) self.tasks_dictionary.setdefault(task_name, OrderedDict())['raw_commandline'] = command_array # task runtime section (docker image; disk; CPU; RAM; etc.) if section.name == "Runtime": runtime_dict = self.parse_task_runtime(section.attr("map")) self.tasks_dictionary.setdefault(task_name, OrderedDict())['runtime'] = runtime_dict # task output filenames section (expected return values/files) if section.name == "Outputs": output_array = self.parse_task_outputs(section) self.tasks_dictionary.setdefault(task_name, OrderedDict())['outputs'] = output_array
python
def parse_task(self, task): ''' Parses a WDL task AST subtree. Currently looks at and parses 4 sections: 1. Declarations (e.g. string x = 'helloworld') 2. Commandline (a bash command with dynamic variables inserted) 3. Runtime (docker image; disk; CPU; RAM; etc.) 4. Outputs (expected return values/files) :param task: An AST subtree of a WDL "Task". :return: Returns nothing but adds a task to the self.tasks_dictionary necessary for much of the parser. ''' task_name = task.attributes["name"].source_string # task declarations declaration_array = [] for declaration_subAST in task.attr("declarations"): declaration_array.append(self.parse_task_declaration(declaration_subAST)) self.tasks_dictionary.setdefault(task_name, OrderedDict())['inputs'] = declaration_array for section in task.attr("sections"): # task commandline entries section [command(s) to run] if section.name == "RawCommand": command_array = self.parse_task_rawcommand(section) self.tasks_dictionary.setdefault(task_name, OrderedDict())['raw_commandline'] = command_array # task runtime section (docker image; disk; CPU; RAM; etc.) if section.name == "Runtime": runtime_dict = self.parse_task_runtime(section.attr("map")) self.tasks_dictionary.setdefault(task_name, OrderedDict())['runtime'] = runtime_dict # task output filenames section (expected return values/files) if section.name == "Outputs": output_array = self.parse_task_outputs(section) self.tasks_dictionary.setdefault(task_name, OrderedDict())['outputs'] = output_array
[ "def", "parse_task", "(", "self", ",", "task", ")", ":", "task_name", "=", "task", ".", "attributes", "[", "\"name\"", "]", ".", "source_string", "# task declarations", "declaration_array", "=", "[", "]", "for", "declaration_subAST", "in", "task", ".", "attr",...
Parses a WDL task AST subtree. Currently looks at and parses 4 sections: 1. Declarations (e.g. string x = 'helloworld') 2. Commandline (a bash command with dynamic variables inserted) 3. Runtime (docker image; disk; CPU; RAM; etc.) 4. Outputs (expected return values/files) :param task: An AST subtree of a WDL "Task". :return: Returns nothing but adds a task to the self.tasks_dictionary necessary for much of the parser.
[ "Parses", "a", "WDL", "task", "AST", "subtree", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L144-L182
225,081
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_task_declaration
def parse_task_declaration(self, declaration_subAST): ''' Parses the declaration section of the WDL task AST subtree. Examples: String my_name String your_name Int two_chains_i_mean_names = 0 :param declaration_subAST: Some subAST representing a task declaration like: 'String file_name' :return: var_name, var_type, var_value Example: Input subAST representing: 'String file_name' Output: var_name='file_name', var_type='String', var_value=None ''' var_name = self.parse_declaration_name(declaration_subAST.attr("name")) var_type = self.parse_declaration_type(declaration_subAST.attr("type")) var_expressn = self.parse_declaration_expressn(declaration_subAST.attr("expression"), es='') return (var_name, var_type, var_expressn)
python
def parse_task_declaration(self, declaration_subAST): ''' Parses the declaration section of the WDL task AST subtree. Examples: String my_name String your_name Int two_chains_i_mean_names = 0 :param declaration_subAST: Some subAST representing a task declaration like: 'String file_name' :return: var_name, var_type, var_value Example: Input subAST representing: 'String file_name' Output: var_name='file_name', var_type='String', var_value=None ''' var_name = self.parse_declaration_name(declaration_subAST.attr("name")) var_type = self.parse_declaration_type(declaration_subAST.attr("type")) var_expressn = self.parse_declaration_expressn(declaration_subAST.attr("expression"), es='') return (var_name, var_type, var_expressn)
[ "def", "parse_task_declaration", "(", "self", ",", "declaration_subAST", ")", ":", "var_name", "=", "self", ".", "parse_declaration_name", "(", "declaration_subAST", ".", "attr", "(", "\"name\"", ")", ")", "var_type", "=", "self", ".", "parse_declaration_type", "(...
Parses the declaration section of the WDL task AST subtree. Examples: String my_name String your_name Int two_chains_i_mean_names = 0 :param declaration_subAST: Some subAST representing a task declaration like: 'String file_name' :return: var_name, var_type, var_value Example: Input subAST representing: 'String file_name' Output: var_name='file_name', var_type='String', var_value=None
[ "Parses", "the", "declaration", "section", "of", "the", "WDL", "task", "AST", "subtree", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L184-L205
225,082
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_task_rawcommand
def parse_task_rawcommand(self, rawcommand_subAST): ''' Parses the rawcommand section of the WDL task AST subtree. Task "rawcommands" are divided into many parts. There are 2 types of parts: normal strings, & variables that can serve as changeable inputs. The following example command: 'echo ${variable1} ${variable2} > output_file.txt' Has 5 parts: Normal String: 'echo ' Variable Input: variable1 Normal String: ' ' Variable Input: variable2 Normal String: ' > output_file.txt' Variables can also have additional conditions, like 'sep', which is like the python ''.join() function and in WDL looks like: ${sep=" -V " GVCFs} and would be translated as: ' -V '.join(GVCFs). :param rawcommand_subAST: A subAST representing some bash command. :return: A list=[] of tuples=() representing the parts of the command: e.g. [(command_var, command_type, additional_conditions_list), ...] Where: command_var = 'GVCFs' command_type = 'variable' command_actions = {'sep': ' -V '} ''' command_array = [] for code_snippet in rawcommand_subAST.attributes["parts"]: # normal string if isinstance(code_snippet, wdl_parser.Terminal): command_var = "r'''" + code_snippet.source_string + "'''" # a variable like ${dinosaurDNA} if isinstance(code_snippet, wdl_parser.Ast): if code_snippet.name == 'CommandParameter': # change in the future? seems to be a different parameter but works for all cases it seems? code_expr = self.parse_declaration_expressn(code_snippet.attr('expr'), es='') code_attributes = self.parse_task_rawcommand_attributes(code_snippet.attr('attributes')) command_var = self.modify_cmd_expr_w_attributes(code_expr, code_attributes) if isinstance(code_snippet, wdl_parser.AstList): raise NotImplementedError command_array.append(command_var) return command_array
python
def parse_task_rawcommand(self, rawcommand_subAST): ''' Parses the rawcommand section of the WDL task AST subtree. Task "rawcommands" are divided into many parts. There are 2 types of parts: normal strings, & variables that can serve as changeable inputs. The following example command: 'echo ${variable1} ${variable2} > output_file.txt' Has 5 parts: Normal String: 'echo ' Variable Input: variable1 Normal String: ' ' Variable Input: variable2 Normal String: ' > output_file.txt' Variables can also have additional conditions, like 'sep', which is like the python ''.join() function and in WDL looks like: ${sep=" -V " GVCFs} and would be translated as: ' -V '.join(GVCFs). :param rawcommand_subAST: A subAST representing some bash command. :return: A list=[] of tuples=() representing the parts of the command: e.g. [(command_var, command_type, additional_conditions_list), ...] Where: command_var = 'GVCFs' command_type = 'variable' command_actions = {'sep': ' -V '} ''' command_array = [] for code_snippet in rawcommand_subAST.attributes["parts"]: # normal string if isinstance(code_snippet, wdl_parser.Terminal): command_var = "r'''" + code_snippet.source_string + "'''" # a variable like ${dinosaurDNA} if isinstance(code_snippet, wdl_parser.Ast): if code_snippet.name == 'CommandParameter': # change in the future? seems to be a different parameter but works for all cases it seems? code_expr = self.parse_declaration_expressn(code_snippet.attr('expr'), es='') code_attributes = self.parse_task_rawcommand_attributes(code_snippet.attr('attributes')) command_var = self.modify_cmd_expr_w_attributes(code_expr, code_attributes) if isinstance(code_snippet, wdl_parser.AstList): raise NotImplementedError command_array.append(command_var) return command_array
[ "def", "parse_task_rawcommand", "(", "self", ",", "rawcommand_subAST", ")", ":", "command_array", "=", "[", "]", "for", "code_snippet", "in", "rawcommand_subAST", ".", "attributes", "[", "\"parts\"", "]", ":", "# normal string", "if", "isinstance", "(", "code_snip...
Parses the rawcommand section of the WDL task AST subtree. Task "rawcommands" are divided into many parts. There are 2 types of parts: normal strings, & variables that can serve as changeable inputs. The following example command: 'echo ${variable1} ${variable2} > output_file.txt' Has 5 parts: Normal String: 'echo ' Variable Input: variable1 Normal String: ' ' Variable Input: variable2 Normal String: ' > output_file.txt' Variables can also have additional conditions, like 'sep', which is like the python ''.join() function and in WDL looks like: ${sep=" -V " GVCFs} and would be translated as: ' -V '.join(GVCFs). :param rawcommand_subAST: A subAST representing some bash command. :return: A list=[] of tuples=() representing the parts of the command: e.g. [(command_var, command_type, additional_conditions_list), ...] Where: command_var = 'GVCFs' command_type = 'variable' command_actions = {'sep': ' -V '}
[ "Parses", "the", "rawcommand", "section", "of", "the", "WDL", "task", "AST", "subtree", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L228-L275
225,083
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_task_runtime
def parse_task_runtime(self, runtime_subAST): ''' Parses the runtime section of the WDL task AST subtree. The task "runtime" section currently supports context fields for a docker container, CPU resources, RAM resources, and disk resources. :param runtime_subAST: A subAST representing runtime parameters. :return: A list=[] of runtime attributes, for example: runtime_attributes = [('docker','quay.io/encode-dcc/map:v1.0'), ('cpu','2'), ('memory','17.1 GB'), ('disks','local-disk 420 HDD')] ''' runtime_attributes = OrderedDict() if isinstance(runtime_subAST, wdl_parser.Terminal): raise NotImplementedError elif isinstance(runtime_subAST, wdl_parser.Ast): raise NotImplementedError elif isinstance(runtime_subAST, wdl_parser.AstList): for ast in runtime_subAST: key = self.parse_task_runtime_key(ast.attr('key')) value = self.parse_declaration_expressn(ast.attr('value'), es='') if value.startswith('"'): value = self.translate_wdl_string_to_python_string(value[1:-1]) runtime_attributes[key] = value return runtime_attributes
python
def parse_task_runtime(self, runtime_subAST): ''' Parses the runtime section of the WDL task AST subtree. The task "runtime" section currently supports context fields for a docker container, CPU resources, RAM resources, and disk resources. :param runtime_subAST: A subAST representing runtime parameters. :return: A list=[] of runtime attributes, for example: runtime_attributes = [('docker','quay.io/encode-dcc/map:v1.0'), ('cpu','2'), ('memory','17.1 GB'), ('disks','local-disk 420 HDD')] ''' runtime_attributes = OrderedDict() if isinstance(runtime_subAST, wdl_parser.Terminal): raise NotImplementedError elif isinstance(runtime_subAST, wdl_parser.Ast): raise NotImplementedError elif isinstance(runtime_subAST, wdl_parser.AstList): for ast in runtime_subAST: key = self.parse_task_runtime_key(ast.attr('key')) value = self.parse_declaration_expressn(ast.attr('value'), es='') if value.startswith('"'): value = self.translate_wdl_string_to_python_string(value[1:-1]) runtime_attributes[key] = value return runtime_attributes
[ "def", "parse_task_runtime", "(", "self", ",", "runtime_subAST", ")", ":", "runtime_attributes", "=", "OrderedDict", "(", ")", "if", "isinstance", "(", "runtime_subAST", ",", "wdl_parser", ".", "Terminal", ")", ":", "raise", "NotImplementedError", "elif", "isinsta...
Parses the runtime section of the WDL task AST subtree. The task "runtime" section currently supports context fields for a docker container, CPU resources, RAM resources, and disk resources. :param runtime_subAST: A subAST representing runtime parameters. :return: A list=[] of runtime attributes, for example: runtime_attributes = [('docker','quay.io/encode-dcc/map:v1.0'), ('cpu','2'), ('memory','17.1 GB'), ('disks','local-disk 420 HDD')]
[ "Parses", "the", "runtime", "section", "of", "the", "WDL", "task", "AST", "subtree", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L306-L332
225,084
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_task_outputs
def parse_task_outputs(self, i): ''' Parse the WDL output section. Outputs are like declarations, with a type, name, and value. Examples: ------------ Simple Cases ------------ 'Int num = 7' var_name: 'num' var_type: 'Int' var_value: 7 String idea = 'Lab grown golden eagle burgers.' var_name: 'idea' var_type: 'String' var_value: 'Lab grown golden eagle burgers.' File ideaFile = 'goldenEagleStemCellStartUpDisrupt.txt' var_name: 'ideaFile' var_type: 'File' var_value: 'goldenEagleStemCellStartUpDisrupt.txt' ------------------- More Abstract Cases ------------------- Array[File] allOfMyTerribleIdeas = glob(*.txt)[0] var_name: 'allOfMyTerribleIdeas' var_type**: 'File' var_value: [*.txt] var_actions: {'index_lookup': '0', 'glob': 'None'} **toilwdl.py converts 'Array[File]' to 'ArrayFile' :return: output_array representing outputs generated by the job/task: e.g. x = [(var_name, var_type, var_value, var_actions), ...] ''' output_array = [] for j in i.attributes['attributes']: if j.name == 'Output': var_name = self.parse_declaration_name(j.attr("name")) var_type = self.parse_declaration_type(j.attr("type")) var_expressn = self.parse_declaration_expressn(j.attr("expression"), es='', output_expressn=True) if not (var_expressn.startswith('(') and var_expressn.endswith(')')): var_expressn = self.translate_wdl_string_to_python_string(var_expressn) output_array.append((var_name, var_type, var_expressn)) else: raise NotImplementedError return output_array
python
def parse_task_outputs(self, i): ''' Parse the WDL output section. Outputs are like declarations, with a type, name, and value. Examples: ------------ Simple Cases ------------ 'Int num = 7' var_name: 'num' var_type: 'Int' var_value: 7 String idea = 'Lab grown golden eagle burgers.' var_name: 'idea' var_type: 'String' var_value: 'Lab grown golden eagle burgers.' File ideaFile = 'goldenEagleStemCellStartUpDisrupt.txt' var_name: 'ideaFile' var_type: 'File' var_value: 'goldenEagleStemCellStartUpDisrupt.txt' ------------------- More Abstract Cases ------------------- Array[File] allOfMyTerribleIdeas = glob(*.txt)[0] var_name: 'allOfMyTerribleIdeas' var_type**: 'File' var_value: [*.txt] var_actions: {'index_lookup': '0', 'glob': 'None'} **toilwdl.py converts 'Array[File]' to 'ArrayFile' :return: output_array representing outputs generated by the job/task: e.g. x = [(var_name, var_type, var_value, var_actions), ...] ''' output_array = [] for j in i.attributes['attributes']: if j.name == 'Output': var_name = self.parse_declaration_name(j.attr("name")) var_type = self.parse_declaration_type(j.attr("type")) var_expressn = self.parse_declaration_expressn(j.attr("expression"), es='', output_expressn=True) if not (var_expressn.startswith('(') and var_expressn.endswith(')')): var_expressn = self.translate_wdl_string_to_python_string(var_expressn) output_array.append((var_name, var_type, var_expressn)) else: raise NotImplementedError return output_array
[ "def", "parse_task_outputs", "(", "self", ",", "i", ")", ":", "output_array", "=", "[", "]", "for", "j", "in", "i", ".", "attributes", "[", "'attributes'", "]", ":", "if", "j", ".", "name", "==", "'Output'", ":", "var_name", "=", "self", ".", "parse_...
Parse the WDL output section. Outputs are like declarations, with a type, name, and value. Examples: ------------ Simple Cases ------------ 'Int num = 7' var_name: 'num' var_type: 'Int' var_value: 7 String idea = 'Lab grown golden eagle burgers.' var_name: 'idea' var_type: 'String' var_value: 'Lab grown golden eagle burgers.' File ideaFile = 'goldenEagleStemCellStartUpDisrupt.txt' var_name: 'ideaFile' var_type: 'File' var_value: 'goldenEagleStemCellStartUpDisrupt.txt' ------------------- More Abstract Cases ------------------- Array[File] allOfMyTerribleIdeas = glob(*.txt)[0] var_name: 'allOfMyTerribleIdeas' var_type**: 'File' var_value: [*.txt] var_actions: {'index_lookup': '0', 'glob': 'None'} **toilwdl.py converts 'Array[File]' to 'ArrayFile' :return: output_array representing outputs generated by the job/task: e.g. x = [(var_name, var_type, var_value, var_actions), ...]
[ "Parse", "the", "WDL", "output", "section", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L334-L385
225,085
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_workflow
def parse_workflow(self, workflow): ''' Parses a WDL workflow AST subtree. Currently looks at and parses 3 sections: 1. Declarations (e.g. string x = 'helloworld') 2. Calls (similar to a python def) 3. Scatter (which expects to map to a Call or multiple Calls) Returns nothing but creates the self.workflows_dictionary necessary for much of the parser. :param workflow: An AST subtree of a WDL "Workflow". :return: Returns nothing but adds a workflow to the self.workflows_dictionary necessary for much of the parser. ''' workflow_name = workflow.attr('name').source_string wf_declared_dict = OrderedDict() for section in workflow.attr("body"): if section.name == "Declaration": var_name, var_map = self.parse_workflow_declaration(section) wf_declared_dict[var_name] = var_map self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['wf_declarations'] = wf_declared_dict if section.name == "Scatter": scattertask = self.parse_workflow_scatter(section) self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['scatter' + str(self.scatter_number)] = scattertask self.scatter_number += 1 if section.name == "Call": task = self.parse_workflow_call(section) self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['call' + str(self.call_number)] = task self.call_number += 1 if section.name == "If": task = self.parse_workflow_if(section) self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['if' + str(self.if_number)] = task self.if_number += 1
python
def parse_workflow(self, workflow): ''' Parses a WDL workflow AST subtree. Currently looks at and parses 3 sections: 1. Declarations (e.g. string x = 'helloworld') 2. Calls (similar to a python def) 3. Scatter (which expects to map to a Call or multiple Calls) Returns nothing but creates the self.workflows_dictionary necessary for much of the parser. :param workflow: An AST subtree of a WDL "Workflow". :return: Returns nothing but adds a workflow to the self.workflows_dictionary necessary for much of the parser. ''' workflow_name = workflow.attr('name').source_string wf_declared_dict = OrderedDict() for section in workflow.attr("body"): if section.name == "Declaration": var_name, var_map = self.parse_workflow_declaration(section) wf_declared_dict[var_name] = var_map self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['wf_declarations'] = wf_declared_dict if section.name == "Scatter": scattertask = self.parse_workflow_scatter(section) self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['scatter' + str(self.scatter_number)] = scattertask self.scatter_number += 1 if section.name == "Call": task = self.parse_workflow_call(section) self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['call' + str(self.call_number)] = task self.call_number += 1 if section.name == "If": task = self.parse_workflow_if(section) self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['if' + str(self.if_number)] = task self.if_number += 1
[ "def", "parse_workflow", "(", "self", ",", "workflow", ")", ":", "workflow_name", "=", "workflow", ".", "attr", "(", "'name'", ")", ".", "source_string", "wf_declared_dict", "=", "OrderedDict", "(", ")", "for", "section", "in", "workflow", ".", "attr", "(", ...
Parses a WDL workflow AST subtree. Currently looks at and parses 3 sections: 1. Declarations (e.g. string x = 'helloworld') 2. Calls (similar to a python def) 3. Scatter (which expects to map to a Call or multiple Calls) Returns nothing but creates the self.workflows_dictionary necessary for much of the parser. :param workflow: An AST subtree of a WDL "Workflow". :return: Returns nothing but adds a workflow to the self.workflows_dictionary necessary for much of the parser.
[ "Parses", "a", "WDL", "workflow", "AST", "subtree", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L454-L493
225,086
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_declaration_expressn_ternaryif
def parse_declaration_expressn_ternaryif(self, cond, iftrue, iffalse, es): """ Classic if statement. This needs to be rearranged. In wdl, this looks like: if <condition> then <iftrue> else <iffalse> In python, this needs to be: <iftrue> if <condition> else <iffalse> :param cond: :param iftrue: :param iffalse: :param es: :return: """ es = es + self.parse_declaration_expressn(iftrue, es='') es = es + ' if ' + self.parse_declaration_expressn(cond, es='') es = es + ' else ' + self.parse_declaration_expressn(iffalse, es='') return es
python
def parse_declaration_expressn_ternaryif(self, cond, iftrue, iffalse, es): es = es + self.parse_declaration_expressn(iftrue, es='') es = es + ' if ' + self.parse_declaration_expressn(cond, es='') es = es + ' else ' + self.parse_declaration_expressn(iffalse, es='') return es
[ "def", "parse_declaration_expressn_ternaryif", "(", "self", ",", "cond", ",", "iftrue", ",", "iffalse", ",", "es", ")", ":", "es", "=", "es", "+", "self", ".", "parse_declaration_expressn", "(", "iftrue", ",", "es", "=", "''", ")", "es", "=", "es", "+", ...
Classic if statement. This needs to be rearranged. In wdl, this looks like: if <condition> then <iftrue> else <iffalse> In python, this needs to be: <iftrue> if <condition> else <iffalse> :param cond: :param iftrue: :param iffalse: :param es: :return:
[ "Classic", "if", "statement", ".", "This", "needs", "to", "be", "rearranged", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L799-L818
225,087
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_declaration_expressn_tupleliteral
def parse_declaration_expressn_tupleliteral(self, values, es): """ Same in python. Just a parenthesis enclosed tuple. :param values: :param es: :return: """ es = es + '(' for ast in values: es = es + self.parse_declaration_expressn(ast, es='') + ', ' if es.endswith(', '): es = es[:-2] return es + ')'
python
def parse_declaration_expressn_tupleliteral(self, values, es): es = es + '(' for ast in values: es = es + self.parse_declaration_expressn(ast, es='') + ', ' if es.endswith(', '): es = es[:-2] return es + ')'
[ "def", "parse_declaration_expressn_tupleliteral", "(", "self", ",", "values", ",", "es", ")", ":", "es", "=", "es", "+", "'('", "for", "ast", "in", "values", ":", "es", "=", "es", "+", "self", ".", "parse_declaration_expressn", "(", "ast", ",", "es", "="...
Same in python. Just a parenthesis enclosed tuple. :param values: :param es: :return:
[ "Same", "in", "python", ".", "Just", "a", "parenthesis", "enclosed", "tuple", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L820-L833
225,088
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_declaration_expressn_arrayliteral
def parse_declaration_expressn_arrayliteral(self, values, es): """ Same in python. Just a square bracket enclosed array. :param values: :param es: :return: """ es = es + '[' for ast in values: es = es + self.parse_declaration_expressn(ast, es='') + ', ' if es.endswith(', '): es = es[:-2] return es + ']'
python
def parse_declaration_expressn_arrayliteral(self, values, es): es = es + '[' for ast in values: es = es + self.parse_declaration_expressn(ast, es='') + ', ' if es.endswith(', '): es = es[:-2] return es + ']'
[ "def", "parse_declaration_expressn_arrayliteral", "(", "self", ",", "values", ",", "es", ")", ":", "es", "=", "es", "+", "'['", "for", "ast", "in", "values", ":", "es", "=", "es", "+", "self", ".", "parse_declaration_expressn", "(", "ast", ",", "es", "="...
Same in python. Just a square bracket enclosed array. :param values: :param es: :return:
[ "Same", "in", "python", ".", "Just", "a", "square", "bracket", "enclosed", "array", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L835-L848
225,089
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_declaration_expressn_operator
def parse_declaration_expressn_operator(self, lhsAST, rhsAST, es, operator): """ Simply joins the left and right hand arguments lhs and rhs with an operator. :param lhsAST: :param rhsAST: :param es: :param operator: :return: """ if isinstance(lhsAST, wdl_parser.Terminal): if lhsAST.str == 'string': es = es + '"{string}"'.format(string=lhsAST.source_string) else: es = es + '{string}'.format(string=lhsAST.source_string) elif isinstance(lhsAST, wdl_parser.Ast): es = es + self.parse_declaration_expressn(lhsAST, es='') elif isinstance(lhsAST, wdl_parser.AstList): raise NotImplementedError es = es + operator if isinstance(rhsAST, wdl_parser.Terminal): if rhsAST.str == 'string': es = es + '"{string}"'.format(string=rhsAST.source_string) else: es = es + '{string}'.format(string=rhsAST.source_string) elif isinstance(rhsAST, wdl_parser.Ast): es = es + self.parse_declaration_expressn(rhsAST, es='') elif isinstance(rhsAST, wdl_parser.AstList): raise NotImplementedError return es
python
def parse_declaration_expressn_operator(self, lhsAST, rhsAST, es, operator): if isinstance(lhsAST, wdl_parser.Terminal): if lhsAST.str == 'string': es = es + '"{string}"'.format(string=lhsAST.source_string) else: es = es + '{string}'.format(string=lhsAST.source_string) elif isinstance(lhsAST, wdl_parser.Ast): es = es + self.parse_declaration_expressn(lhsAST, es='') elif isinstance(lhsAST, wdl_parser.AstList): raise NotImplementedError es = es + operator if isinstance(rhsAST, wdl_parser.Terminal): if rhsAST.str == 'string': es = es + '"{string}"'.format(string=rhsAST.source_string) else: es = es + '{string}'.format(string=rhsAST.source_string) elif isinstance(rhsAST, wdl_parser.Ast): es = es + self.parse_declaration_expressn(rhsAST, es='') elif isinstance(rhsAST, wdl_parser.AstList): raise NotImplementedError return es
[ "def", "parse_declaration_expressn_operator", "(", "self", ",", "lhsAST", ",", "rhsAST", ",", "es", ",", "operator", ")", ":", "if", "isinstance", "(", "lhsAST", ",", "wdl_parser", ".", "Terminal", ")", ":", "if", "lhsAST", ".", "str", "==", "'string'", ":...
Simply joins the left and right hand arguments lhs and rhs with an operator. :param lhsAST: :param rhsAST: :param es: :param operator: :return:
[ "Simply", "joins", "the", "left", "and", "right", "hand", "arguments", "lhs", "and", "rhs", "with", "an", "operator", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L850-L881
225,090
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_declaration_expressn_fncall
def parse_declaration_expressn_fncall(self, name, params, es): """ Parses out cromwell's built-in function calls. Some of these are special and need minor adjustments, for example length(), which is equivalent to python's len() function. Or sub, which is equivalent to re.sub(), but needs a rearrangement of input variables. Known to be supported: sub, size, read_tsv, length, select_first. :param name: :param params: :param es: :return: """ # name of the function if isinstance(name, wdl_parser.Terminal): if name.str: # use python's built-in for length() if name.source_string == 'length': es = es + 'len(' elif name.source_string == 'stdout': return es + 'stdout' else: es = es + name.source_string + '(' else: raise NotImplementedError elif isinstance(name, wdl_parser.Ast): raise NotImplementedError elif isinstance(name, wdl_parser.AstList): raise NotImplementedError # use python's re.sub() for sub() if name.source_string == 'sub': es_params = self.parse_declaration_expressn_fncall_SUBparams(params) else: es_params = self.parse_declaration_expressn_fncall_normalparams(params) if name.source_string == 'glob': return es + es_params + ', tempDir)' elif name.source_string == 'size': return es + es_params + ', fileStore=fileStore)' else: return es + es_params + ')'
python
def parse_declaration_expressn_fncall(self, name, params, es): # name of the function if isinstance(name, wdl_parser.Terminal): if name.str: # use python's built-in for length() if name.source_string == 'length': es = es + 'len(' elif name.source_string == 'stdout': return es + 'stdout' else: es = es + name.source_string + '(' else: raise NotImplementedError elif isinstance(name, wdl_parser.Ast): raise NotImplementedError elif isinstance(name, wdl_parser.AstList): raise NotImplementedError # use python's re.sub() for sub() if name.source_string == 'sub': es_params = self.parse_declaration_expressn_fncall_SUBparams(params) else: es_params = self.parse_declaration_expressn_fncall_normalparams(params) if name.source_string == 'glob': return es + es_params + ', tempDir)' elif name.source_string == 'size': return es + es_params + ', fileStore=fileStore)' else: return es + es_params + ')'
[ "def", "parse_declaration_expressn_fncall", "(", "self", ",", "name", ",", "params", ",", "es", ")", ":", "# name of the function", "if", "isinstance", "(", "name", ",", "wdl_parser", ".", "Terminal", ")", ":", "if", "name", ".", "str", ":", "# use python's bu...
Parses out cromwell's built-in function calls. Some of these are special and need minor adjustments, for example length(), which is equivalent to python's len() function. Or sub, which is equivalent to re.sub(), but needs a rearrangement of input variables. Known to be supported: sub, size, read_tsv, length, select_first. :param name: :param params: :param es: :return:
[ "Parses", "out", "cromwell", "s", "built", "-", "in", "function", "calls", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L883-L927
225,091
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
AnalyzeWDL.parse_workflow_declaration
def parse_workflow_declaration(self, wf_declaration_subAST): ''' Parses a WDL declaration AST subtree into a string and a python dictionary containing its 'type' and 'value'. For example: var_name = refIndex var_map = {'type': File, 'value': bamIndex} :param wf_declaration_subAST: An AST subtree of a workflow declaration. :return: var_name, which is the name of the declared variable :return: var_map, a dictionary with keys for type and value. e.g. {'type': File, 'value': bamIndex} ''' var_map = OrderedDict() var_name = self.parse_declaration_name(wf_declaration_subAST.attr("name")) var_type = self.parse_declaration_type(wf_declaration_subAST.attr("type")) var_expressn = self.parse_declaration_expressn(wf_declaration_subAST.attr("expression"), es='') var_map['name'] = var_name var_map['type'] = var_type var_map['value'] = var_expressn return var_name, var_map
python
def parse_workflow_declaration(self, wf_declaration_subAST): ''' Parses a WDL declaration AST subtree into a string and a python dictionary containing its 'type' and 'value'. For example: var_name = refIndex var_map = {'type': File, 'value': bamIndex} :param wf_declaration_subAST: An AST subtree of a workflow declaration. :return: var_name, which is the name of the declared variable :return: var_map, a dictionary with keys for type and value. e.g. {'type': File, 'value': bamIndex} ''' var_map = OrderedDict() var_name = self.parse_declaration_name(wf_declaration_subAST.attr("name")) var_type = self.parse_declaration_type(wf_declaration_subAST.attr("type")) var_expressn = self.parse_declaration_expressn(wf_declaration_subAST.attr("expression"), es='') var_map['name'] = var_name var_map['type'] = var_type var_map['value'] = var_expressn return var_name, var_map
[ "def", "parse_workflow_declaration", "(", "self", ",", "wf_declaration_subAST", ")", ":", "var_map", "=", "OrderedDict", "(", ")", "var_name", "=", "self", ".", "parse_declaration_name", "(", "wf_declaration_subAST", ".", "attr", "(", "\"name\"", ")", ")", "var_ty...
Parses a WDL declaration AST subtree into a string and a python dictionary containing its 'type' and 'value'. For example: var_name = refIndex var_map = {'type': File, 'value': bamIndex} :param wf_declaration_subAST: An AST subtree of a workflow declaration. :return: var_name, which is the name of the declared variable :return: var_map, a dictionary with keys for type and value. e.g. {'type': File, 'value': bamIndex}
[ "Parses", "a", "WDL", "declaration", "AST", "subtree", "into", "a", "string", "and", "a", "python", "dictionary", "containing", "its", "type", "and", "value", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L970-L994
225,092
DataBiosphere/toil
src/toil/jobStores/aws/jobStore.py
AWSJobStore._registered
def _registered(self): """ A optional boolean property indidcating whether this job store is registered. The registry is the authority on deciding if a job store exists or not. If True, this job store exists, if None the job store is transitioning from True to False or vice versa, if False the job store doesn't exist. :type: bool|None """ # The weird mapping of the SDB item attribute value to the property value is due to # backwards compatibility. 'True' becomes True, that's easy. Toil < 3.3.0 writes this at # the end of job store creation. Absence of either the registry, the item or the # attribute becomes False, representing a truly absent, non-existing job store. An # attribute value of 'False', which is what Toil < 3.3.0 writes at the *beginning* of job # store destruction, indicates a job store in transition, reflecting the fact that 3.3.0 # may leak buckets or domains even though the registry reports 'False' for them. We # can't handle job stores that were partially created by 3.3.0, though. registry_domain = self._bindDomain(domain_name='toil-registry', create=False, block=False) if registry_domain is None: return False else: for attempt in retry_sdb(): with attempt: attributes = registry_domain.get_attributes(item_name=self.namePrefix, attribute_name='exists', consistent_read=True) try: exists = attributes['exists'] except KeyError: return False else: if exists == 'True': return True elif exists == 'False': return None else: assert False
python
def _registered(self): # The weird mapping of the SDB item attribute value to the property value is due to # backwards compatibility. 'True' becomes True, that's easy. Toil < 3.3.0 writes this at # the end of job store creation. Absence of either the registry, the item or the # attribute becomes False, representing a truly absent, non-existing job store. An # attribute value of 'False', which is what Toil < 3.3.0 writes at the *beginning* of job # store destruction, indicates a job store in transition, reflecting the fact that 3.3.0 # may leak buckets or domains even though the registry reports 'False' for them. We # can't handle job stores that were partially created by 3.3.0, though. registry_domain = self._bindDomain(domain_name='toil-registry', create=False, block=False) if registry_domain is None: return False else: for attempt in retry_sdb(): with attempt: attributes = registry_domain.get_attributes(item_name=self.namePrefix, attribute_name='exists', consistent_read=True) try: exists = attributes['exists'] except KeyError: return False else: if exists == 'True': return True elif exists == 'False': return None else: assert False
[ "def", "_registered", "(", "self", ")", ":", "# The weird mapping of the SDB item attribute value to the property value is due to", "# backwards compatibility. 'True' becomes True, that's easy. Toil < 3.3.0 writes this at", "# the end of job store creation. Absence of either the registry, the item o...
A optional boolean property indidcating whether this job store is registered. The registry is the authority on deciding if a job store exists or not. If True, this job store exists, if None the job store is transitioning from True to False or vice versa, if False the job store doesn't exist. :type: bool|None
[ "A", "optional", "boolean", "property", "indidcating", "whether", "this", "job", "store", "is", "registered", ".", "The", "registry", "is", "the", "authority", "on", "deciding", "if", "a", "job", "store", "exists", "or", "not", ".", "If", "True", "this", "...
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/jobStore.py#L170-L208
225,093
DataBiosphere/toil
src/toil/jobStores/aws/jobStore.py
AWSJobStore._bindBucket
def _bindBucket(self, bucket_name, create=False, block=True, versioning=False): """ Return the Boto Bucket object representing the S3 bucket with the given name. If the bucket does not exist and `create` is True, it will be created. :param str bucket_name: the name of the bucket to bind to :param bool create: Whether to create bucket the if it doesn't exist :param bool block: If False, return None if the bucket doesn't exist. If True, wait until bucket appears. Ignored if `create` is True. :rtype: Bucket|None :raises S3ResponseError: If `block` is True and the bucket still doesn't exist after the retry timeout expires. """ assert self.minBucketNameLen <= len(bucket_name) <= self.maxBucketNameLen assert self.bucketNameRe.match(bucket_name) log.debug("Binding to job store bucket '%s'.", bucket_name) def bucket_creation_pending(e): # https://github.com/BD2KGenomics/toil/issues/955 # https://github.com/BD2KGenomics/toil/issues/995 # https://github.com/BD2KGenomics/toil/issues/1093 return (isinstance(e, (S3CreateError, S3ResponseError)) and e.error_code in ('BucketAlreadyOwnedByYou', 'OperationAborted')) bucketExisted = True for attempt in retry_s3(predicate=bucket_creation_pending): with attempt: try: bucket = self.s3.get_bucket(bucket_name, validate=True) except S3ResponseError as e: if e.error_code == 'NoSuchBucket': bucketExisted = False log.debug("Bucket '%s' does not exist.", bucket_name) if create: log.debug("Creating bucket '%s'.", bucket_name) location = region_to_bucket_location(self.region) bucket = self.s3.create_bucket(bucket_name, location=location) assert self.__getBucketRegion(bucket) == self.region elif block: raise else: return None elif e.status == 301: # This is raised if the user attempts to get a bucket in a region outside # the specified one, if the specified one is not `us-east-1`. The us-east-1 # server allows a user to use buckets from any region. bucket = self.s3.get_bucket(bucket_name, validate=False) raise BucketLocationConflictException(self.__getBucketRegion(bucket)) else: raise else: if self.__getBucketRegion(bucket) != self.region: raise BucketLocationConflictException(self.__getBucketRegion(bucket)) if versioning and not bucketExisted: # only call this method on bucket creation bucket.configure_versioning(True) else: # now test for versioning consistency # we should never see any of these errors since 'versioning' should always be true bucket_versioning = self.__getBucketVersioning(bucket) if bucket_versioning != versioning: assert False, 'Cannot modify versioning on existing bucket' elif bucket_versioning is None: assert False, 'Cannot use a bucket with versioning suspended' if bucketExisted: log.debug("Using pre-existing job store bucket '%s'.", bucket_name) else: log.debug("Created new job store bucket '%s'.", bucket_name) return bucket
python
def _bindBucket(self, bucket_name, create=False, block=True, versioning=False): assert self.minBucketNameLen <= len(bucket_name) <= self.maxBucketNameLen assert self.bucketNameRe.match(bucket_name) log.debug("Binding to job store bucket '%s'.", bucket_name) def bucket_creation_pending(e): # https://github.com/BD2KGenomics/toil/issues/955 # https://github.com/BD2KGenomics/toil/issues/995 # https://github.com/BD2KGenomics/toil/issues/1093 return (isinstance(e, (S3CreateError, S3ResponseError)) and e.error_code in ('BucketAlreadyOwnedByYou', 'OperationAborted')) bucketExisted = True for attempt in retry_s3(predicate=bucket_creation_pending): with attempt: try: bucket = self.s3.get_bucket(bucket_name, validate=True) except S3ResponseError as e: if e.error_code == 'NoSuchBucket': bucketExisted = False log.debug("Bucket '%s' does not exist.", bucket_name) if create: log.debug("Creating bucket '%s'.", bucket_name) location = region_to_bucket_location(self.region) bucket = self.s3.create_bucket(bucket_name, location=location) assert self.__getBucketRegion(bucket) == self.region elif block: raise else: return None elif e.status == 301: # This is raised if the user attempts to get a bucket in a region outside # the specified one, if the specified one is not `us-east-1`. The us-east-1 # server allows a user to use buckets from any region. bucket = self.s3.get_bucket(bucket_name, validate=False) raise BucketLocationConflictException(self.__getBucketRegion(bucket)) else: raise else: if self.__getBucketRegion(bucket) != self.region: raise BucketLocationConflictException(self.__getBucketRegion(bucket)) if versioning and not bucketExisted: # only call this method on bucket creation bucket.configure_versioning(True) else: # now test for versioning consistency # we should never see any of these errors since 'versioning' should always be true bucket_versioning = self.__getBucketVersioning(bucket) if bucket_versioning != versioning: assert False, 'Cannot modify versioning on existing bucket' elif bucket_versioning is None: assert False, 'Cannot use a bucket with versioning suspended' if bucketExisted: log.debug("Using pre-existing job store bucket '%s'.", bucket_name) else: log.debug("Created new job store bucket '%s'.", bucket_name) return bucket
[ "def", "_bindBucket", "(", "self", ",", "bucket_name", ",", "create", "=", "False", ",", "block", "=", "True", ",", "versioning", "=", "False", ")", ":", "assert", "self", ".", "minBucketNameLen", "<=", "len", "(", "bucket_name", ")", "<=", "self", ".", ...
Return the Boto Bucket object representing the S3 bucket with the given name. If the bucket does not exist and `create` is True, it will be created. :param str bucket_name: the name of the bucket to bind to :param bool create: Whether to create bucket the if it doesn't exist :param bool block: If False, return None if the bucket doesn't exist. If True, wait until bucket appears. Ignored if `create` is True. :rtype: Bucket|None :raises S3ResponseError: If `block` is True and the bucket still doesn't exist after the retry timeout expires.
[ "Return", "the", "Boto", "Bucket", "object", "representing", "the", "S3", "bucket", "with", "the", "given", "name", ".", "If", "the", "bucket", "does", "not", "exist", "and", "create", "is", "True", "it", "will", "be", "created", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/jobStore.py#L672-L744
225,094
DataBiosphere/toil
src/toil/jobStores/aws/jobStore.py
AWSJobStore._bindDomain
def _bindDomain(self, domain_name, create=False, block=True): """ Return the Boto Domain object representing the SDB domain of the given name. If the domain does not exist and `create` is True, it will be created. :param str domain_name: the name of the domain to bind to :param bool create: True if domain should be created if it doesn't exist :param bool block: If False, return None if the domain doesn't exist. If True, wait until domain appears. This parameter is ignored if create is True. :rtype: Domain|None :raises SDBResponseError: If `block` is True and the domain still doesn't exist after the retry timeout expires. """ log.debug("Binding to job store domain '%s'.", domain_name) retryargs = dict(predicate=lambda e: no_such_sdb_domain(e) or sdb_unavailable(e)) if not block: retryargs['timeout'] = 15 for attempt in retry_sdb(**retryargs): with attempt: try: return self.db.get_domain(domain_name) except SDBResponseError as e: if no_such_sdb_domain(e): if create: return self.db.create_domain(domain_name) elif block: raise else: return None else: raise
python
def _bindDomain(self, domain_name, create=False, block=True): log.debug("Binding to job store domain '%s'.", domain_name) retryargs = dict(predicate=lambda e: no_such_sdb_domain(e) or sdb_unavailable(e)) if not block: retryargs['timeout'] = 15 for attempt in retry_sdb(**retryargs): with attempt: try: return self.db.get_domain(domain_name) except SDBResponseError as e: if no_such_sdb_domain(e): if create: return self.db.create_domain(domain_name) elif block: raise else: return None else: raise
[ "def", "_bindDomain", "(", "self", ",", "domain_name", ",", "create", "=", "False", ",", "block", "=", "True", ")", ":", "log", ".", "debug", "(", "\"Binding to job store domain '%s'.\"", ",", "domain_name", ")", "retryargs", "=", "dict", "(", "predicate", "...
Return the Boto Domain object representing the SDB domain of the given name. If the domain does not exist and `create` is True, it will be created. :param str domain_name: the name of the domain to bind to :param bool create: True if domain should be created if it doesn't exist :param bool block: If False, return None if the domain doesn't exist. If True, wait until domain appears. This parameter is ignored if create is True. :rtype: Domain|None :raises SDBResponseError: If `block` is True and the domain still doesn't exist after the retry timeout expires.
[ "Return", "the", "Boto", "Domain", "object", "representing", "the", "SDB", "domain", "of", "the", "given", "name", ".", "If", "the", "domain", "does", "not", "exist", "and", "create", "is", "True", "it", "will", "be", "created", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/jobStore.py#L746-L779
225,095
DataBiosphere/toil
src/toil/jobStores/aws/jobStore.py
AWSJobStore.__getBucketVersioning
def __getBucketVersioning(self, bucket): """ For newly created buckets get_versioning_status returns an empty dict. In the past we've seen None in this case. We map both to a return value of False. Otherwise, the 'Versioning' entry in the dictionary returned by get_versioning_status can be 'Enabled', 'Suspended' or 'Disabled' which we map to True, None and False respectively. Note that we've never seen a versioning status of 'Disabled', only the empty dictionary. Calling configure_versioning with False on a bucket will cause get_versioning_status to then return 'Suspended' even on a new bucket that never had versioning enabled. """ for attempt in retry_s3(): with attempt: status = bucket.get_versioning_status() return self.versionings[status['Versioning']] if status else False
python
def __getBucketVersioning(self, bucket): for attempt in retry_s3(): with attempt: status = bucket.get_versioning_status() return self.versionings[status['Versioning']] if status else False
[ "def", "__getBucketVersioning", "(", "self", ",", "bucket", ")", ":", "for", "attempt", "in", "retry_s3", "(", ")", ":", "with", "attempt", ":", "status", "=", "bucket", ".", "get_versioning_status", "(", ")", "return", "self", ".", "versionings", "[", "st...
For newly created buckets get_versioning_status returns an empty dict. In the past we've seen None in this case. We map both to a return value of False. Otherwise, the 'Versioning' entry in the dictionary returned by get_versioning_status can be 'Enabled', 'Suspended' or 'Disabled' which we map to True, None and False respectively. Note that we've never seen a versioning status of 'Disabled', only the empty dictionary. Calling configure_versioning with False on a bucket will cause get_versioning_status to then return 'Suspended' even on a new bucket that never had versioning enabled.
[ "For", "newly", "created", "buckets", "get_versioning_status", "returns", "an", "empty", "dict", ".", "In", "the", "past", "we", "ve", "seen", "None", "in", "this", "case", ".", "We", "map", "both", "to", "a", "return", "value", "of", "False", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/jobStore.py#L1226-L1241
225,096
DataBiosphere/toil
src/toil/lib/iterables.py
flatten
def flatten( iterables ): """ Flatten an iterable, except for string elements. """ for it in iterables: if isinstance(it, str): yield it else: for element in it: yield element
python
def flatten( iterables ): for it in iterables: if isinstance(it, str): yield it else: for element in it: yield element
[ "def", "flatten", "(", "iterables", ")", ":", "for", "it", "in", "iterables", ":", "if", "isinstance", "(", "it", ",", "str", ")", ":", "yield", "it", "else", ":", "for", "element", "in", "it", ":", "yield", "element" ]
Flatten an iterable, except for string elements.
[ "Flatten", "an", "iterable", "except", "for", "string", "elements", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/iterables.py#L25-L32
225,097
DataBiosphere/toil
src/toil/batchSystems/abstractBatchSystem.py
BatchSystemSupport.checkResourceRequest
def checkResourceRequest(self, memory, cores, disk): """ Check resource request is not greater than that available or allowed. :param int memory: amount of memory being requested, in bytes :param float cores: number of cores being requested :param int disk: amount of disk space being requested, in bytes :raise InsufficientSystemResources: raised when a resource is requested in an amount greater than allowed """ assert memory is not None assert disk is not None assert cores is not None if cores > self.maxCores: raise InsufficientSystemResources('cores', cores, self.maxCores) if memory > self.maxMemory: raise InsufficientSystemResources('memory', memory, self.maxMemory) if disk > self.maxDisk: raise InsufficientSystemResources('disk', disk, self.maxDisk)
python
def checkResourceRequest(self, memory, cores, disk): assert memory is not None assert disk is not None assert cores is not None if cores > self.maxCores: raise InsufficientSystemResources('cores', cores, self.maxCores) if memory > self.maxMemory: raise InsufficientSystemResources('memory', memory, self.maxMemory) if disk > self.maxDisk: raise InsufficientSystemResources('disk', disk, self.maxDisk)
[ "def", "checkResourceRequest", "(", "self", ",", "memory", ",", "cores", ",", "disk", ")", ":", "assert", "memory", "is", "not", "None", "assert", "disk", "is", "not", "None", "assert", "cores", "is", "not", "None", "if", "cores", ">", "self", ".", "ma...
Check resource request is not greater than that available or allowed. :param int memory: amount of memory being requested, in bytes :param float cores: number of cores being requested :param int disk: amount of disk space being requested, in bytes :raise InsufficientSystemResources: raised when a resource is requested in an amount greater than allowed
[ "Check", "resource", "request", "is", "not", "greater", "than", "that", "available", "or", "allowed", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/abstractBatchSystem.py#L229-L250
225,098
DataBiosphere/toil
src/toil/batchSystems/abstractBatchSystem.py
BatchSystemLocalSupport.handleLocalJob
def handleLocalJob(self, jobNode): # type: (JobNode) -> Optional[int] """ To be called by issueBatchJobs. Returns the jobID if the jobNode has been submitted to the local queue, otherwise returns None """ if (not self.config.runCwlInternalJobsOnWorkers and jobNode.jobName.startswith(CWL_INTERNAL_JOBS)): return self.localBatch.issueBatchJob(jobNode) else: return None
python
def handleLocalJob(self, jobNode): # type: (JobNode) -> Optional[int] if (not self.config.runCwlInternalJobsOnWorkers and jobNode.jobName.startswith(CWL_INTERNAL_JOBS)): return self.localBatch.issueBatchJob(jobNode) else: return None
[ "def", "handleLocalJob", "(", "self", ",", "jobNode", ")", ":", "# type: (JobNode) -> Optional[int]", "if", "(", "not", "self", ".", "config", ".", "runCwlInternalJobsOnWorkers", "and", "jobNode", ".", "jobName", ".", "startswith", "(", "CWL_INTERNAL_JOBS", ")", "...
To be called by issueBatchJobs. Returns the jobID if the jobNode has been submitted to the local queue, otherwise returns None
[ "To", "be", "called", "by", "issueBatchJobs", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/abstractBatchSystem.py#L311-L322
225,099
DataBiosphere/toil
src/toil/batchSystems/abstractBatchSystem.py
BatchSystemLocalSupport.getNextJobID
def getNextJobID(self): # type: () -> int """ Must be used to get job IDs so that the local and batch jobs do not conflict. """ with self.localBatch.jobIndexLock: jobID = self.localBatch.jobIndex self.localBatch.jobIndex += 1 return jobID
python
def getNextJobID(self): # type: () -> int with self.localBatch.jobIndexLock: jobID = self.localBatch.jobIndex self.localBatch.jobIndex += 1 return jobID
[ "def", "getNextJobID", "(", "self", ")", ":", "# type: () -> int", "with", "self", ".", "localBatch", ".", "jobIndexLock", ":", "jobID", "=", "self", ".", "localBatch", ".", "jobIndex", "self", ".", "localBatch", ".", "jobIndex", "+=", "1", "return", "jobID"...
Must be used to get job IDs so that the local and batch jobs do not conflict.
[ "Must", "be", "used", "to", "get", "job", "IDs", "so", "that", "the", "local", "and", "batch", "jobs", "do", "not", "conflict", "." ]
a8252277ff814e7bee0971139c2344f88e44b644
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/abstractBatchSystem.py#L344-L352