rem stringlengths 0 322k | add stringlengths 0 2.05M | context stringlengths 8 228k |
|---|---|---|
parentq = self.session.query(Job.job_id).filter(Job.name == parentName) parentid = parentq.one()[0] edge = Edge() edge.parent_id = parentid edge.child_id = job.job_id edge.commit_to_db(self.session) | parentq = self.session.query(Job.job_id).filter(Job.name == parentName).filter(Job.wf_id == job.wf_id) for parentid in parentq.all(): edge = Edge() edge.parent_id = parentid[0] edge.child_id = job.job_id edge.commit_to_db(self.session) | def job(self, linedata): """ @type linedata: dict @param linedata: One line of BP data dict-ified. Handles a job insert event. """ job = self.linedataToObject(linedata, Job()) # get wf_id job.wf_id = self.wf_uuidToId(job.wf_uuid) if job.wf_id == None: er = 'No wf_id associated with wf_uuid %s - can not insert job ... |
try: if short == True: dt = timestamp[:-5] tz = timestamp[-5:] else: dt = timestamp[:-6] tz = timestamp[-6:] tz = tz[:-3] + tz[-2:] if short == False: if dt.find('.'): dt = dt[:dt.find('.')] my_time = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") else: my_time = datetime.datetime.strptime(dt, "%Y%m%dT%H%M%S")... | try: m = parse_iso8601.search(timestamp) if m is None: logger.warn("ERROR: Unable to match \"%s\" to ISO 8601" % timestamp) return None else: dt = "%04d-%02d-%02d %02d:%02d:%02d" % (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6))) tz = m.group(8) my_time = datetim... | def epochdate(timestamp, short=False): """ This function converts an ISO timestamp into seconds since epoch Set short to False when the timestamp is in the YYYY-MM-DDTHH:MM:SSZZZ:ZZ format Set short to True when the timestamp is in the YYYYMMDDTHHMMSSZZZZZ format """ try: # Split date/time and timezone information if ... |
self.workflow_run_wall_time ='-' | self.workflow_cpu_time ='-' | def __init__(self): self.submit_dir ='-' self.workflow_run_time = '-' self.workflow_run_wall_time ='-' self.total_jobs = '-' self.succeeded_jobs ='-' self.failed_jobs ='-' self.unsubmitted_jobs ='-' self.unknown_jobs ='-' self.total_succeeded_tasks ='-' self.total_failed_tasks ='-' self.job_statistics_dict ={} self.tra... |
workflow_info +=(("Total workflow execution time :" + str(self.workflow_run_time)).ljust(job_run_statistics_size)) | workflow_info +=(("Workflow execution wall time :" + str(self.workflow_run_time)).ljust(job_run_statistics_size)) | def get_formatted_workflow_info(self): workflow_info = '' workflow_info +=("#" + self.submit_dir) workflow_info +=( "\n") workflow_info +=(("Total workflow execution time :" + str(self.workflow_run_time)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("Total workflow execution wall time ... |
workflow_info +=(("Total workflow execution wall time :" + str(self.workflow_run_wall_time)).ljust(job_run_statistics_size)) | workflow_info +=(("Total cpu time consumed :" + str(self.workflow_cpu_time)).ljust(job_run_statistics_size)) | def get_formatted_workflow_info(self): workflow_info = '' workflow_info +=("#" + self.submit_dir) workflow_info +=( "\n") workflow_info +=(("Total workflow execution time :" + str(self.workflow_run_time)).ljust(job_run_statistics_size)) workflow_info +=("\n") workflow_info +=(("Total workflow execution wall time ... |
workflow_run_wall_time =0 | workflow_cpu_time =0 | def populate_workflow_details(workflow): """ populates the workflow statistics information Param: the workflow reference """ workflow_stat = WorkflowStatistics() transformation_stats_dict ={} job_stats_dict ={} total_succeeded_tasks =0 total_failed_tasks =0 failed_jobs =0 succeeded_jobs =0 unknown_jobs =0 unsubmitted_j... |
workflow_run_wall_time +=task.duration | workflow_cpu_time +=task.duration | def populate_workflow_details(workflow): """ populates the workflow statistics information Param: the workflow reference """ workflow_stat = WorkflowStatistics() transformation_stats_dict ={} job_stats_dict ={} total_succeeded_tasks =0 total_failed_tasks =0 failed_jobs =0 succeeded_jobs =0 unknown_jobs =0 unsubmitted_j... |
workflow_stat.workflow_run_wall_time = workflow_run_wall_time | workflow_stat.workflow_cpu_time = workflow_cpu_time | def populate_workflow_details(workflow): """ populates the workflow statistics information Param: the workflow reference """ workflow_stat = WorkflowStatistics() transformation_stats_dict ={} job_stats_dict ={} total_succeeded_tasks =0 total_failed_tasks =0 failed_jobs =0 succeeded_jobs =0 unknown_jobs =0 unsubmitted_j... |
all_stat.workflow_run_wall_time = workflow_stat_list[0].workflow_run_wall_time | all_stat.workflow_cpu_time = workflow_stat_list[0].workflow_cpu_time | def print_workflow_details(workflow_stat_list): # print workflow statistics all_stat = WorkflowStatistics() wf_stats_file = os.path.join(output_dir, "workflow") try: fh = open(wf_stats_file, "w") all_stat.submit_dir = "All" all_stat.workflow_run_time =workflow_stat_list[0].workflow_run_time all_stat.workflow_run_wall_... |
parse_iso8601 = re.compile(r'(\d{4})-?(\d{2})-?(\d{2})[ tT]?(\d{2}):?(\d{2}):?(\d{2})([.,]\d+)?([zZ]|[-+](\d{2}):?(\d{2}))') | def isodate(now=int(time.time()), utc=False, short=False): """ This function converts seconds since epoch into ISO timestamp """ if utc: my_time = time.gmtime(now) else: # FIXME: Zone offset is wrong on CentOS 5.5 with python 2.4 my_time = time.localtime(now) if short: if utc: return time.strftime("%Y%m%dT%H%M%SZ", m... | |
logger.warn("ERROR: Unable to match \"%s\" to ISO 8601" % timestamp) | logger.warn("unable to match \"%s\" to ISO 8601" % timestamp) | def epochdate(timestamp, short=False): """ This function converts an ISO timestamp into seconds since epoch Set short to False when the timestamp is in the YYYY-MM-DDTHH:MM:SSZZZ:ZZ format Set short to True when the timestamp is in the YYYYMMDDTHHMMSSZZZZZ format """ try: # Split date/time and timezone information m =... |
logger.warn("ERROR: Unable to parse timestamp \"%s\"" % timestamp) | logger.warn("unable to parse timestamp \"%s\"" % timestamp) | def epochdate(timestamp, short=False): """ This function converts an ISO timestamp into seconds since epoch Set short to False when the timestamp is in the YYYY-MM-DDTHH:MM:SSZZZ:ZZ format Set short to True when the timestamp is in the YYYYMMDDTHHMMSSZZZZZ format """ try: # Split date/time and timezone information m =... |
def slurp_braindb(run): | def slurp_braindb(run, brain_alternate=None): | def slurp_braindb(run): """ Reads extra configuration from braindump database Param: run is the run directory Returns: Dictionary with the configuration, empty if error """ my_config = {} my_braindb = os.path.join(run, brainbase) try: my_file = open(my_braindb, 'r') except: # Error opening file return my_config for l... |
my_braindb = os.path.join(run, brainbase) | if brain_alternate is None: my_braindb = os.path.join(run, brainbase) else: my_braindb = os.path.join(run, brain_alternate) | def slurp_braindb(run): """ Reads extra configuration from braindump database Param: run is the run directory Returns: Dictionary with the configuration, empty if error """ my_config = {} my_braindb = os.path.join(run, brainbase) try: my_file = open(my_braindb, 'r') except: # Error opening file return my_config for l... |
return time.strftime("%Y%m%dT%H%M%S%z", my_time) | if utc: return time.strftime("%Y%m%dT%H%M%SZ", my_time) else: return time.strftime("%Y%m%dT%H%M%S%z", my_time) | def isodate(now=int(time.time()), utc=False, short=False): """ This function converts seconds since epoch into ISO timestamp """ if utc: my_time = time.gmtime(now) else: my_time = time.localtime(now) if short: return time.strftime("%Y%m%dT%H%M%S%z", my_time) else: return time.strftime("%Y-%m-%dT%H:%M:%S%z", my_time) |
return time.strftime("%Y-%m-%dT%H:%M:%S%z", my_time) | if utc: return time.strftime("%Y-%m-%dT%H:%M:%SZ", my_time) else: return time.strftime("%Y-%m-%dT%H:%M:%S%z", my_time) | def isodate(now=int(time.time()), utc=False, short=False): """ This function converts seconds since epoch into ISO timestamp """ if utc: my_time = time.gmtime(now) else: my_time = time.localtime(now) if short: return time.strftime("%Y%m%dT%H%M%S%z", my_time) else: return time.strftime("%Y-%m-%dT%H:%M:%S%z", my_time) |
my_time = my_time - my_offset | if tz[0] == '-': my_time = my_time + my_offset else: my_time = my_time - my_offset | def epochdate(timestamp, short=False): """ This function converts an ISO timestamp into seconds since epoch Set short to False when the timestamp is in the YYYY-MM-DDTHH:MM:SSZZZ:ZZ format Set short to True when the timestamp is in the YYYYMMDDTHHMMSSZZZZZ format """ try: # Split date/time and timezone information m =... |
print "Testing isodate() function" print " long local timestamp:", isodate() print " long utc timestamp:", isodate(utc=True) print "short local timestamp:", isodate(short=True) print " short utc timestamp:", isodate(utc=True,short=True) | now = int(time.time()) print "Testing isodate() function from now=%lu" % (now) print " long local timestamp:", isodate(now=now) print " long utc timestamp:", isodate(now=now,utc=True) print "short local timestamp:", isodate(now=now,short=True) print " short utc timestamp:", isodate(now=now,utc=True,short=True) print... | def keep_foreground(): """ This function turns the program into almost a daemon, but keep in foreground for Condor. It does not take any parameters and does not return anything. """ # Go to a safe place that is not susceptible to sudden umounts # FIX THIS: It may break some things try: os.chdir('/') except: logger.cri... |
logger.warn("Warning: run directory mismatch, using %s" % (run)) | logger.warn("run directory mismatch, using %s" % (run)) | def slurp_braindb(run, brain_alternate=None): """ Reads extra configuration from braindump database Param: run is the run directory Returns: Dictionary with the configuration, empty if error """ my_config = {} if brain_alternate is None: my_braindb = os.path.join(run, brainbase) else: my_braindb = os.path.join(run, br... |
def checkMyProxy( self , proxy=None, Time=100, checkRetrieverRenewer=False): | def checkMyProxy( self , userKerb=None, Time=100, checkRetrieverRenewer=False): | def checkMyProxy( self , proxy=None, Time=100, checkRetrieverRenewer=False): """ Note The Name is Really CONFUSING... but functionality is the same as for myproxy """ expires = None if userKerb == None: userKerb = self.getUserKerberos() |
if not timeLeftLocal : | if not timeLeft : | def checkMyProxy( self , proxy=None, Time=100, checkRetrieverRenewer=False): """ Note The Name is Really CONFUSING... but functionality is the same as for myproxy """ expires = None if userKerb == None: userKerb = self.getUserKerberos() |
maxEventsWritten = maxEvents, firstEvent = self.firstEvent, firstRun = self.workflowSpec.workflowRunNumber(), firstLumi = self.count) | maxEventsWritten=maxEvents, firstEvent=self.firstEvent, firstRun=self.workflowSpec.workflowRunNumber(), firstLumi=self.count, skipEvents=skipEvents) | def generateJobConfig(self, jobSpecNode): """ _generateJobConfig_ |
maxEvents = maxEvents, firstEvent = self.firstEvent, firstRun = self.workflowSpec.workflowRunNumber(), firstLumi = self.count) | maxEvents=maxEvents, firstEvent=self.firstEvent, firstRun=self.workflowSpec.workflowRunNumber(), firstLumi=self.count, skipEvents=skipEvents) | def generateJobConfig(self, jobSpecNode): """ _generateJobConfig_ |
logging.info("Retrying ldapsearch ... (%i/%i)" % (i, retries)) | logging.debug("Retrying ldapsearch ... (%i/%i)" % (i, retries)) | def ldapsearch(host, dn, filter, attr, logging, scope=ldap.SCOPE_SUBTREE, retries=5): timeout = 45 # seconds for i in range(retries+1): try: if i > 0: logging.info("Retrying ldapsearch ... (%i/%i)" % (i, retries)) time.sleep(i*10) con = ldap.initialize(host) # host = ldap://hostname[:port] bind = TimeoutFunctio... |
self.logging.info("Trying GIIS %s, %s" % (root['host'], root['base'])) | self.logging.debug("Trying GIIS %s, %s" % (root['host'], root['base'])) | def pick_CEs_from_giis_trees(self, root, tags, vos, seList, blacklist, whitelist, full): """ Recursively traverse the GIIS tree, starting from 'root', return CEs fullfilling requirements. """ |
self.logging.info("No active (and valid) jobs!") | self.logging.info("No (valid) jobs to query") | def query(self, obj, service='', objType='node'): """ Query status and eventually other scheduler related information, and store it in the job.runningJob data structure. |
cmdList.append('myproxy-logon -d -n -s %s -o %s -l \'%s\' -k %s -t 168:00'%\ | cmdList.append('myproxy-logon -d -n -s %s -o %s -l \"%s\" -k %s -t 168:00'%\ | def logonMyProxy( self, proxyCache, userDN, vo='cms', group=None, role=None): """ """ |
selectedBlock = chooseBlock(matchedBlocks.keys()) | selectedBlock = self.chooseBlock(matchedBlocks.keys()) | def getPileupFiles(self, *sites): """ _getPileupFiles_ |
if ( name == f['PFN']): | if ( name == os.path.basename(f['PFN'])): | def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return |
tmp = string.split(file_name, ".") | only_name = os.path.basename(file_name) tmp = string.split(only_name, ".") | def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return |
tmp = string.split(file_name, "_"+n_job) | tmp = string.split(only_name, "_"+n_job) | def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return |
modifyFile(aFile, file_name) | modifyFile(aFile, os.path.basename(file_name), for_file) | def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return |
exitcode, outputs = self.executeCommand(cmd, timeout = tout) | exitcode, outputs = self.executeCommand(cmd, timeout = self.timeout) | def __init__(self): super(ProtocolUberFtp, self).__init__() |
def hackTheEnv(prependCommand = ''): """ HaCk ThE eNv *IMPORTANT NOTES* - this hack is necessary if the SYSTEM python under '/usr/bin') and the EXTERNAL python have the same version - the hack is necessary only for CLI which are python(2) script - the hack reverts PATH & LD_LYBRARY_PATH if an external PYTHON is prese... | def decodeSubmit(self, jsonString): """ specialized method to decode JSON output of glite-wms-job-submit """ # pre-processing the string before decoding toParse = jsonString.replace( '\n' , ',' ) toParse = self.pattern1.sub(r'{ "\1', toParse[:-1] ) toParse = self.pattern2.sub(r'":"\1', toParse ) toParse = self.pattern... | |
return self.ExecuteCommand( self.proxyString + command )[0] | out, ret = self.ExecuteCommand( self.proxyString + self.hackEnv + command ) return out | def postMortem( self, schedulerId, outfile, service): """ perform scheduler logging-info """ command = "glite-wms-job-logging-info -v 3 " + schedulerId + \ " > " + outfile return self.ExecuteCommand( self.proxyString + command )[0] |
outJson, ret = self.ExecuteCommand(self.proxyString + command) | outJson, ret = self.ExecuteCommand(self.proxyString + self.hackEnv + command) | def query(self, obj, service='', objType='node') : """ query status and eventually other scheduler related information """ # jobId for remapping jobIds = {} |
returncode = -666666 | returncode = 0 | def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_ |
self.logging.info('Your server credential will expire in:\n\t%s hours %s minutes %s seconds\n'%(hours,minutes,seconds)) | logMsg = 'Your credential for the required server will expire in:\n\t' logMsg += '%s hours %s minutes %s seconds\n'%(hours,minutes,seconds) self.logging.info(logMsg) | def checkMyProxy( self , proxy=None, Time=4, checkRetrieverRenewer=False): """ """ if proxy == None: proxy=self.getUserProxy() ## check the myproxy server valid = True |
and subsequent lines are indented by at least 1 space. | and subsequent lines are indented by at least 1 space or start with "This job was only very recently submitted". | def splitNgstatOutput(output): """ Split a string of ngstat output into a list with one job per list item. The assumption is that the first line of a job has no indentation, and subsequent lines are indented by at least 1 space. """ jobs = [] s = "" for line in output.split('\n'): if len(line) == 0: continue if lin... |
s += line + '\n' | s += '\n' + line elif re.match("This job was only very recently submitted", line): s += ' ' + line | def splitNgstatOutput(output): """ Split a string of ngstat output into a list with one job per list item. The assumption is that the first line of a job has no indentation, and subsequent lines are indented by at least 1 space. """ jobs = [] s = "" for line in output.split('\n'): if len(line) == 0: continue if lin... |
jobs.append(s) s = line + '\n' | jobs.append(s + '\n') s = line | def splitNgstatOutput(output): """ Split a string of ngstat output into a list with one job per list item. The assumption is that the first line of a job has no indentation, and subsequent lines are indented by at least 1 space. """ jobs = [] s = "" for line in output.split('\n'): if len(line) == 0: continue if lin... |
args = job['arguments'].replace('\\"', '').replace('\\', '') xrsl += '(arguments=%s)' % args | if job['arguments']: args = job['arguments'].replace('\\"', '').replace('\\', '') xrsl += '(arguments=%s)' % args | def decode(self, job, task, requirements=''): """ prepare scheduler specific job description |
for s in task['jobType'].split('&&'): if re.match('^ *\(.*=.*\) *$', s): xrsl += s | if task['jobType']: for s in task['jobType'].split('&&'): if re.match('^ *\(.*=.*\) *$', s): xrsl += s | def decode(self, job, task, requirements=''): """ prepare scheduler specific job description |
xrsl_file = os.path.dirname(task['cfgName']) + '/job.xrsl' | xrsl_file = os.path.dirname(task['cfgName'] or './') + '/%s-jobs.xrsl' % task['name'] | def submit(self, task, requirements='', config='', service = ''): """ set up submission parameters and submit uses self.decode() |
arcIdMatch = re.search("(\w+://([a-zA-Z0-9.-]+)\S*/\d*)", output) | arcIdMatch = re.search("(\w+://([a-zA-Z0-9.-]+)\S*/\d*)", jobstring) | def query(self, obj, service='', objType='node'): """ Query status and eventually other scheduler related information, and store it in the job.runningJob data structure. |
arcIdMatch = re.search("URL: (\w+://([a-zA-Z0-9.-]+)\S*/\d*)", output) | arcIdMatch = re.search("URL: (\w+://([a-zA-Z0-9.-]+)\S*/\d*)", jobstring) | def query(self, obj, service='', objType='node'): """ Query status and eventually other scheduler related information, and store it in the job.runningJob data structure. |
exitcode = -1 | def createDir(self, source, opt = "", tout = None): """ _createDir_ """ exitcode = -1 outputs = "" ll = source.getLynk() source_fullpath = ll.split("file://",1)[1] if self.checkExists(source, opt) is False: cmd = "/bin/mkdir -m 775 -p " + opt + " " + source_fullpath exitcode, outputs = self.executeCommand(cmd, timeout... | |
int(float(self.eventsPerJob) / float(efficiency)) | int(float(self.eventsPerMCDBJob) / float(efficiency)) | def generateJobConfig(self, jobSpecNode): """ _generateJobConfig_ |
ifile = ifile[1:] | ifile = ifile | def collectionJdlFile ( self, task, requirements='' ): """ build a collection jdl easy to be handled by the wmproxy API interface and gives back the list of input files for a better handling """ # general part for task jdl = "[\n" jdl += 'Type = "collection";\n' |
if job.runningJob['status'] == 'C' : continue | def query(self, obj, service='', objType='node'): """ query status of jobs """ | |
logging.WARNING( 'Warning: an error occurred killing subprocess [%s]' \ % str(err) ) | pass | def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_ |
logging.WARNING( 'Warning: an error occurred closing subprocess [%s] %s %s' \ % (str(err), ''.join(outc)+''.join(errc), p.returncode )) | pass | def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_ |
logging.DEBUG(command) logging.DEBUG(returncode) logging.DEBUG(''.join(outc)) logging.DEBUG(''.join(errc)) | def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_ | |
self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-\+\=])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-\+\=])') self.pattern3 = re.compile( '[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]*)"') self.pattern4 = re.compile( '[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]*):') self.pattern5 = re.compile(',[\s]*}(... | self.pattern1 = re.compile('([^ \t\n\r\f\v\{\}]+)\s') self.pattern2 = re.compile(':"(["|\{])') self.pattern3 = re.compile('"[\s]*"') | def __init__(self): # call super super(BossliteJsonDecoder, self).__init__() # cache pattern to optimize reg-exp substitution self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-\+\=])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-\+\=])') self.pattern3 = re.compile( '[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]... |
toParse = jsonString.replace( '\n' , ',' ) toParse = self.pattern1.sub(r'{ "\1', toParse[:-1] ) toParse = self.pattern2.sub(r'":"\1', toParse ) toParse = self.pattern3.sub(r'\1","\2"', toParse ) toParse = self.pattern4.sub(r'\1","\2":', toParse ) toParse = self.pattern5.sub(r'}', toParse) toParse = self.pattern6.sub(r'... | toParse = jsonString.replace( '\n' , ' ' ) toParse = self.pattern1.sub(r'"\1"', toParse ) toParse = self.pattern2.sub(r'":\1', toParse ) toParse = self.pattern3.sub(r'","', toParse ) | def decodeSubmit(self, jsonString): """ specialized method to decode JSON output of glite-wms-job-submit """ # pre-processing the string before decoding toParse = jsonString.replace( '\n' , ',' ) toParse = self.pattern1.sub(r'{ "\1', toParse[:-1] ) toParse = self.pattern2.sub(r'":"\1', toParse ) toParse = self.pattern... |
'hr':'R', | def __init__( self, **args): | |
'DONE':'SD' | 'Done':'SD' | def __init__( self, **args): |
map[ 'statusScheduler' ] = st | if st=='r': map[ 'statusScheduler' ] = 'Running' | def queryLocal(self, schedIdList, objType='node' ) : |
st = "DONE" | st = "Done" | def queryLocal(self, schedIdList, objType='node' ) : |
if not mKilled: raise SchedulerError ( "Unable to kill job "+jobid+" . Reason: ", out ) | mKilled2= r2.search(out) if not mKilled and not mKilled2: raise SchedulerError ( "Unable to kill job | def kill( self, obj ): """ kill the job instance |
self.session.commit() | if self.database == "MySQL": self.session.commit() | def modify(self, query): """ execute a query which does not return such as insert/update/delete """ |
self.fresh_env = 'unset LD_LIBRARY_PATH; export PATH=/usr/bin:/bin; source /etc/profile; source %s ; '%env | self.fresh_env = 'unset LD_LIBRARY_PATH; unset GLITE_ENV_SET; export PATH=/usr/bin:/bin; source /etc/profile; source %s ; '%env | def __init__(self): super(ProtocolLcgUtils, self).__init__() self.options = " --verbose " self.options += " --vo=cms " env = '' source = self.expandEnv('RUNTIME_AREA', '/CacheEnv.sh') if os.path.isfile(str(source).strip()): env = str(source) vars = {\ 'OSG_GRID': '/setup.sh', \ 'GLITE_WMS_LOCATION': '/etc/profile.d/g... |
self.killThreshold = 100 | def __init__( self, **args): # call super class init method super(SchedulerGLite, self).__init__(**args) # some initializations self.warnings = [] # typical options self.vo = args.get( "vo", "cms" ) self.service = args.get( "service", "" ) self.config = args.get( "config", "" ) self.delegationId = args.get( "proxyna... | |
schedIdList = "" | def kill( self, obj ): """ kill job """ # the object passed is a job if type(obj) == Job and self.valid( obj.runningJob ): # check for the RunningJob integrity schedIdList = str( obj.runningJob['schedulerId'] ).strip() # the object passed is a Task elif type(obj) == Task : schedIdList = "" for job in obj.jobs: if n... | |
schedIdList += " " + \ str( job.runningJob['schedulerId'] ).strip() command = "glite-wms-job-cancel --json --noint " + schedIdList out, ret = self.ExecuteCommand( self.proxyString + command ) if ret != 0 : raise SchedulerError('error executing glite-wms-job-cancel', out) elif ret == 0 and out.find("result: success")... | jobsToKill.append(str( job.runningJob['schedulerId'] ).strip()) chunk = lambda ulist, step: map(lambda i: ulist[i:i+step], xrange(0, len(ulist), step)) lljobs = chunk(jobsToKill, self.killThreshold) for x in lljobs : schedIdList = ' '.join(x) command = "glite-wms-job-cancel --json --noint " + schedIdList out, r... | def kill( self, obj ): """ kill job """ # the object passed is a job if type(obj) == Job and self.valid( obj.runningJob ): # check for the RunningJob integrity schedIdList = str( obj.runningJob['schedulerId'] ).strip() # the object passed is a Task elif type(obj) == Task : schedIdList = "" for job in obj.jobs: if n... |
r = re.compile('^(.*:.*/jobmanager-.*?)-(.*)') | if DEBUG > 1: print 'removeQueues: input %s' % celist r = re.compile('^(.*:.*/(jobmanager|nordugrid|cream)-.*?)-(.*)') | def removeQueues(celist): """ Given a list of CEUniqueIDs, return a list of jobmanager contact strings. """ r = re.compile('^(.*:.*/jobmanager-.*?)-(.*)') jmlist = [] for x in celist: m = r.match(x) if m: item = m.groups()[0] if (jmlist.count(item) == 0): jmlist.append(item) return jmlist |
self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-])') | self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-\+\=])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-\+\=])') | def __init__(self): # call super super(BossliteJsonDecoder, self).__init__() # cache pattern to optimize reg-exp substitution self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-])') self.pattern3 = re.compile( '[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*)"') self.patte... |
'[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*)"') | '[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]*)"') | def __init__(self): # call super super(BossliteJsonDecoder, self).__init__() # cache pattern to optimize reg-exp substitution self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-])') self.pattern3 = re.compile( '[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*)"') self.patte... |
'[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*):') self.pattern5 = re.compile(',[\s]*}(?!"[\s]*[a-zA-Z0-9_\-]*)') self.pattern6 = re.compile('([a-zA-Z0-9_\-])}') | '[\s]*([a-zA-Z0-9_\-\+\=]*),[\s]*([a-zA-Z0-9_\-\+\=]*):') self.pattern5 = re.compile(',[\s]*}(?!"[\s]*[a-zA-Z0-9_\-\+\=]*)') self.pattern6 = re.compile('([a-zA-Z0-9_\-\+\=])}') | def __init__(self): # call super super(BossliteJsonDecoder, self).__init__() # cache pattern to optimize reg-exp substitution self.pattern1 = re.compile('\{,[\s]*([a-zA-Z0-9_\-])') self.pattern2 = re.compile(':[\s]([a-zA-Z_\-])') self.pattern3 = re.compile( '[\s]*([a-zA-Z0-9_\-]*),[\s]*([a-zA-Z0-9_\-]*)"') self.patte... |
jdl += 'InputSandboxBaseURI = "%s";\n' % task['startDirectory'] | def collectionJdlFile ( self, task, requirements='' ): """ build a collection jdl easy to be handled by the wmproxy API interface and gives back the list of input files for a better handling """ # general part for task jdl = "[\n" jdl += 'Type = "collection";\n' | |
if ifile.find( 'file:/' ) == 0: globalSandbox += '"' + ifile + '",' | if ifile.find( 'file:/' ) <= 0: globalSandbox += '"' +task['startDirectory']+ ifile + '",' | def collectionJdlFile ( self, task, requirements='' ): """ build a collection jdl easy to be handled by the wmproxy API interface and gives back the list of input files for a better handling """ # general part for task jdl = "[\n" jdl += 'Type = "collection";\n' |
fileout = out.split() fileout[3] = self.__convertPermission__(out[3]) outt.append( fileout ) | if out: fileout = out.split() fileout[3] = self.__convertPermission__(out[3]) outt.append( fileout ) | def getFileInfo(self, source, token = None, opt = ""): """ rfdir |
if self.valid( job.runningJob ) or objType == 'node': | if self.valid( job.runningJob ) : | def query(self, obj, service='', objType='node') : """ query status and eventually other scheduler related information """ |
formattedParentIds = ','.join(parentIds) formattedJobIds = ','.join(jobIds) command = 'python ' + self.commandQueryPath \ + 'GLiteStatusQuery.py --parentId=%s --jobId=%s' \ % (formattedParentIds, formattedJobIds) outJson, ret = self.ExecuteCommand( self.prefixCommandQuery + \ self.proxyString + command ) try: out =... | if jobIds : formattedParentIds = ','.join(parentIds) formattedJobIds = ','.join(jobIds) command = 'python ' + self.commandQueryPath \ + 'GLiteStatusQuery.py --parentId=%s --jobId=%s' \ % (formattedParentIds, formattedJobIds) outJson, ret = self.ExecuteCommand( self.prefixCommandQuery + \ self.proxyString + command) ... | def query(self, obj, service='', objType='node') : """ query status and eventually other scheduler related information """ |
for h in host_list: | for h in deepcopy(host_list): | def getSoftwareAndArch(host_list, software, arch, bdii='exp-bdii.cern.ch'): """ Given a list of CEs, return only those that match a given software and architecture tag """ generateMaps(host_list, bdii) results_list = [] if (software): software = 'VO-cms-' + software else: software = '*' if (arch): arch = 'VO-cms-' + ... |
return os.path.join(dir, name) | return os.path.join(path, name) | def joinPath(self, path, name): """ joining files with base directory """ if path is None or path == '' : return name |
if ret != 0 : | if ret != 0 and jobid != "None": | def query(self, obj, service='', objType='node') : """ query status and eventually other scheduler related information It may use single 'node' scheduler id or bulk id for association """ if type(obj) != Task : raise SchedulerError('wrong argument type', str( type(obj) )) |
'Done':'SD'} | 'Done':'SD', 'C':'SD'} | def __init__( self, **args): super(SchedulerPbs, self).__init__(**args) self.jobScriptDir=args['jobScriptDir'] self.jobResDir=args['jobResDir'] self.queue=args['queue'] |
s.append('cd $PBS_O_WORKDIR') | if self.workerNodeWorkDir: s.append('cd ' + self.workerNodeWorkDir) | def submitJob ( self, conn, job, task=None, requirements=''): """ Need to copy the inputsandbox to WN before submitting a job""" |
command = "glite-wms-job-output --json --noint " \ | command = "glite-wms-job-output --json --noint --dir " + outdir + " " \ | def getOutput( self, obj, outdir='' ): """ retrieve job output """ if type(obj) == Job : # check for the RunningJob integrity if not self.valid( obj.runningJob ): raise SchedulerError('invalid object', str( obj.runningJob )) # the object passed is a valid Job, let's go on ... command = "glite-wms-job-output --json ... |
command = "cp -R " + tmp.group(1) + "/* " + outdir + "/" | command = "mv " + tmp.group(1) + "/* " + outdir + "/" | def getOutput( self, obj, outdir='' ): """ retrieve job output """ if type(obj) == Job : # check for the RunningJob integrity if not self.valid( obj.runningJob ): raise SchedulerError('invalid object', str( obj.runningJob )) # the object passed is a valid Job, let's go on ... command = "glite-wms-job-output --json ... |
command = "glite-wms-job-output --json --noint " + \ selJob.runningJob['schedulerId'] | command = "glite-wms-job-output --json --noint --dir " + outdir + " " \ + selJob.runningJob['schedulerId'] | def getOutput( self, obj, outdir='' ): """ retrieve job output """ if type(obj) == Job : # check for the RunningJob integrity if not self.valid( obj.runningJob ): raise SchedulerError('invalid object', str( obj.runningJob )) # the object passed is a valid Job, let's go on ... command = "glite-wms-job-output --json ... |
tmp = tmp.replace('doesn"t',"doesn't") | def dumps(self, myString): """ the same interface as simplejson ... """ tmp = str(myString) tmp = tmp.replace('\'','"') tmp = tmp.replace('None','null') return tmp | |
(r, w, e) = select.select([fd], [], [], None) | (r, w, e) = select.select([fd, fde], [], [], None) | def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_ |
if fd in r or fde in r: | if fd in r: | def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_ |
try: readerr = p.stderr.read() except: pass | if fde in r: readerr = p.stderr.read() | def executeCommand(self, command, timeout=None , stderr=False): """ _executeCommand_ |
template = { 'id' : None, 'jobId' : None, 'taskId' : None, 'schedulerId' : None, 'schedulerParentId' : None, | template = { 'schedulerId' : None, 'schedulerParentId' : None, | def main(): """ __main__ """ # load ad-hoc JSON encoder if simplejson is not present try : import simplejson as json except: json = myJSONEncoder() # parse options try: opts, args = getopt.getopt(sys.argv[1:], "", ["help", "parentId=", "jobId="]) except getopt.GetoptError, err: print usage() sys.exit(1) inputFile = ... |
'stopTime' : None | 'stopTime' : None, 'service' : None | def main(): """ __main__ """ # load ad-hoc JSON encoder if simplejson is not present try : import simplejson as json except: json = myJSONEncoder() # parse options try: opts, args = getopt.getopt(sys.argv[1:], "", ["help", "parentId=", "jobId="]) except getopt.GetoptError, err: print usage() sys.exit(1) inputFile = ... |
aFile['SEName']=for_file['for_lfn']+file_name | aFile['LFN']=for_file['for_lfn']+os.path.basename(file_name) | def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return |
aFile['PFN']=for_file['endpoint']+file_name | aFile['PFN']=for_file['endpoint']+os.path.basename(file_name) | def updateLFN(f, lfn, newLFN): """ _updateLFN_ Update a LFN. """ if f['LFN'] != lfn: return f['LFN'] = newLFN return |
for job in obj.jobs: if job['name'].count('.'): returned_name = job['name'].replace('.', '_') returnMap[job['name']] = returnMap.pop(returned_name) | def submit( self, obj, requirements='', config ='', service='' ): """ submit a jdl to glite ends with a call to retrieve wms and job,gridid asssociation """ | |
if obj['name'].count('.'): returned_name = obj['name'].replace('.', '_') returnMap[obj['name']] = returnMap.pop(returned_name) | def submit( self, obj, requirements='', config ='', service='' ): """ submit a jdl to glite ends with a call to retrieve wms and job,gridid asssociation """ | |
cmd = 'rm %s'%proxy | cmd = 'rm %s'%userKerb | def destroyCredential(self, userKerb): """ """ if userKerb == None: msg = "Error no valid user kerberos to remove " raise Exception(msg) |
created = models.DateTime(auto_now_add=True) modified = models.DateTime(auto_now=True) | created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) | def __unicode__(self): return self.name |
top_domain = netloc.split('.')[-2:] | top_domain = '.'.join(netloc.split('.')[-2:]) | def find_organization(self, row): # see if there's a product with the normalized name org = Organization.objects.filter(name__iexact=normalize_org_name(row.get('Source Name'))) if not org.count(): # see if there's a product with the URL urlparts = urlsplit(row.get('Home Page URL', '')) if urlparts: netloc = urlparts.ne... |
org = Organization.objects.filter(homepage__icontains=top_domain) | org = Organization.objects.filter( Q( homepage__icontains='/' + top_domain ) | Q( homepage__icontains='.' + top_domain ) ) | def find_organization(self, row): # see if there's a product with the normalized name org = Organization.objects.filter(name__iexact=normalize_org_name(row.get('Source Name'))) if not org.count(): # see if there's a product with the URL urlparts = urlsplit(row.get('Home Page URL', '')) if urlparts: netloc = urlparts.ne... |
<p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, <span class="year"><a href="/level/10/m_act[year]/1952/">1952</a></span></p> \ | def parse(self, object, content): ''' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, ... | |
<span class="gray">США, <i class="director">реж. <a class="lined" href="/level/4/people/28795/">Эрик Бросс</a></i> \ <br />(триллер, комедия) \ </span> \ <span class="gray"><a class="lined" href="/level/4/people/28798/">МакКензи Эстин</a>, <a class="lined" href="/level/4/people/3497/">Тодд Филд</a></span> \ | def parse(self, object, content): ''' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, ... | |
>>> m.title u'Title' >>> m.id 179805 | def parse(self, object, content): ''' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, ... | |
>>> m.year 1952 | >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <span class="gray">Ultimate Taboo</span> \ </div>') >>> m.runtime >>> m.title_original u'Ultimate Taboo' | def parse(self, object, content): ''' >>> m = Movie() >>> m.parse('link', u'<div class="element width_2"> \ <p class="pic"><a href="/level/1/film/179805/sr/1/"><img src="/images/sm_film/6505.jpg" alt="Title" title="Title" /></a></p> \ <div class="info"> \ <p class="name"><a href="/level/1/film/179805/sr/1/">Title</a>, ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.