Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
600
def objectInfo(self, objname): 'get dict of methodnames on the named object' try: return self.objDict[objname].xmlrpc_methods except __HOLE__: return 'error: server has no object named %s' % objname
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/XMLRPCServerBase.objectInfo
601
def methodCall(self, objname, methodname, args): 'run the named method on the named object and return its result' try: obj = self.objDict[objname] if methodname in obj.xmlrpc_methods: m = getattr(obj, methodname) else: print >>sys.stderr, \ "methodCall: blocked unregistered method %s" % methodname return '' except (__HOLE__, AttributeError): return '' # RETURN FAILURE CODE return m(*args) # RUN THE OBJECT METHOD
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/XMLRPCServerBase.methodCall
602
def assign_processors(self): "hand out available processors to coordinators in order of need" margin = self.overload_margin - 1.0 free_cpus = [] nproc = {} for c in self.coordinators.values(): # COUNT NUMBER OF PROCS for host, pid in c.processors: # RUNNING ON EACH HOST try: nproc[host] += 1.0 # INCREMENT AN EXISTING COUNT except KeyError: nproc[host] = 1.0 # NEW, SO SET INITIAL COUNT for host in self.hosts: # BUILD LIST OF HOST CPUS TO BE ASSIGNED if host not in self.systemLoad: # ADDING A NEW HOST self.systemLoad[host] = 0.0 # DEFAULT LOAD: ASSUME HOST EMPTY try: # host MAY NOT BE IN nproc, SO CATCH THAT ERROR if self.systemLoad[host] > nproc[host]: raise KeyError # USE self.systemLoad[host] except __HOLE__: load = self.systemLoad[host] # MAXIMUM VALUE else: load = nproc[host] # MAXIMUM VALUE if load < self.hosts[host].maxload + margin: free_cpus += int(self.hosts[host].maxload + self.overload_margin - load) * [host] if len(free_cpus) == 0: # WE DON'T HAVE ANY CPUS TO GIVE OUT return False l = [] # BUILD A LIST OF HOW MANY CPUS EACH COORDINATOR NEEDS for c in self.coordinators.values(): ncpu = c.allocated_ncpu - len(c.processors) if ncpu > 0: l += ncpu*[c] # ADD c TO l EXACTLY ncpu TIMES import random random.shuffle(l) # REORDER LIST OF COORDINATORS RANDOMLY i = 0 # INDEX INTO OUR l LIST while i < len(free_cpus) and i < len(l): # Hand out free CPUs one by one. l[i].new_cpus.append(free_cpus[i]) i += 1 return i > 0 # RETURN TRUE IF WE HANDED OUT SOME PROCESSORS
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/ResourceController.assign_processors
603
def set_hostinfo(self, host, attr, val): "increase or decrease the maximum load allowed on a given host" try: setattr(self.hosts[host], attr, val) except __HOLE__: self.hosts[host] = HostInfo('%s=%s' % (attr, str(val))) return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/ResourceController.set_hostinfo
604
def delrule(self, rsrc): "delete a resource generation rule from our database" try: del self.rules[rsrc] except __HOLE__: print >>sys.stderr, "Attempt to delete unknown resource rule %s" \ % rsrc else: self.rules.close() # THIS IS THE ONLY WAY I KNOW TO FLUSH... self.getrules() return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/ResourceController.delrule
605
def register_coordinator(self, name, url, user, priority, resources, immediate, demand_ncpu): "save a coordinator's registration info" try: print >>sys.stderr, 'change_priority: %s (%s,%s): %f -> %f' \ % (name, user, url, self.coordinators[url].priority, priority) self.coordinators[url].priority = priority self.coordinators[url].immediate = immediate self.coordinators[url].demand_ncpu = demand_ncpu except __HOLE__: print >>sys.stderr, 'register_coordinator: %s (%s,%s): %f' \ % (name, user, url, priority) self.coordinators[url] = CoordinatorInfo(name, url, user, priority, resources, self.njob, immediate, demand_ncpu) self.njob += 1 # INCREMENT COUNT OF JOBS WE'VE REGISTERED self.must_rebalance = True # FORCE REBALANCING ON NEXT OPPORTUNITY return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/ResourceController.register_coordinator
606
def unregister_coordinator(self, name, url, message): "remove a coordinator from our list" try: del self.coordinators[url] print >>sys.stderr, 'unregister_coordinator: %s (%s): %s' \ % (name, url, message) self.load_balance() # FORCE IT TO REBALANCE THE LOAD TO NEW JOBS... except __HOLE__: print >>sys.stderr, 'unregister_coordinator: %s unknown:%s (%s)' \ % (name, url, message) return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/ResourceController.unregister_coordinator
607
def request_cpus(self, name, url): "return a list of hosts for this coordinator to run processors on" try: c = self.coordinators[url] except __HOLE__: print >>sys.stderr, 'request_cpus: unknown coordinator %s @ %s' \ % (name, url) return [] # HAND BACK AN EMPTY LIST # Calculate how many CPUs each coordinator should get. self.assign_load() # Assign free CPUs to coordinators which need them. self.assign_processors() new_cpus=tuple(c.new_cpus) # MAKE A NEW COPY OF THE LIST OF HOSTS del c.new_cpus[:] # EMPTY OUR LIST return new_cpus
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/ResourceController.request_cpus
608
def register_processor(self, host, pid, url): "record a new processor starting up" try: self.coordinators[url] += (host, pid) self.systemLoad[host] += 1.0 # THIS PROBABLY INCREASES LOAD BY 1 except __HOLE__: pass return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/ResourceController.register_processor
609
def unregister_processor(self, host, pid, url): "processor shutting down, so remove it from the list" try: self.coordinators[url] -= (host, pid) self.systemLoad[host] -= 1.0 # THIS PROBABLY DECREASES LOAD BY 1 if self.systemLoad[host] < 0.0: self.systemLoad[host] = 0.0 for k, v in self.locks.items(): # MAKE SURE THIS PROC HAS NO LOCKS h = k.split(':')[0] if h == host and v == pid: del self.locks[k] # REMOVE ALL ITS PENDING LOCKS except __HOLE__: pass self.load_balance() # FREEING A PROCESSOR, SO REBALANCE TO USE THIS return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/ResourceController.unregister_processor
610
def get_resource(self, host, pid, rsrc): """return a filename for the resource, or False if rule must be applied, or True if client must wait to get the resource""" key = host + ':' + rsrc try: # JUST HAND BACK THE RESOURCE return self.resources[key] except __HOLE__: if key in self.locks: return True # TELL CLIENT TO WAIT else: return False # TELL CLIENT TO ACQUIRE IT VIA RULE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/ResourceController.get_resource
611
def del_lock(self, host, rsrc): "delete a lock on a pending resource construction process" key = host + ':' + rsrc try: del self.locks[key] # REMOVE THE LOCK except __HOLE__: print >> sys.stderr, "attempt to release non-existent lock \ %s,%s:%d" % (host, rule, pid) return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/ResourceController.del_lock
612
def __getitem__(self, k): try: return dict.__getitem__(self, k) except __HOLE__: val = AttrProxy(self.getattr_proxy, k) self[k] = val return val
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/DictAttrProxy.__getitem__
613
def __init__(self, name, script, it, resources, port=8888, priority=1.0, rc_url=None, errlog=False, immediate=False, ncpu_limit=999999, demand_ncpu=0, max_initialization_errors=3, **kwargs): self.name = name self.script = script self.it = iter(it) # Make sure self.it is an iterator. self.resources = resources self.priority = priority self.errlog = errlog self.immediate = immediate self.ncpu_limit = ncpu_limit self.demand_ncpu = demand_ncpu self.max_initialization_errors = max_initialization_errors self.kwargs = kwargs self.host = get_hostname() self.user = os.environ['USER'] try: # Make sure ssh-agent is available before we launch # a lot of processes. a = os.environ['SSH_AGENT_PID'] except KeyError: raise OSError(1, 'SSH_AGENT_PID not found. No ssh-agent running?') self.dir = os.getcwd() self.n = 0 self.nsuccess = 0 self.nerrors = 0 self.nssh_errors = 0 self.iclient = 0 self.max_clients = 40 if rc_url is None: # Try the default resource-controller address on the same host. rc_url = 'http://%s:5000' % self.host self.rc_url = rc_url # Connect to the resource controller... self.rc_server = xmlrpclib.ServerProxy(rc_url) # ...create an XMLRPC server. self.server, self.port = get_server(self.host, port) # ...and provide it with all the methods. self.server.register_instance(self) self.clients = {} self.pending = {} self.already_done = {} self.stop_clients = {} self.logfile = {} self.clients_starting = {} self.clients_initializing = {} self.initialization_errors = {} try: # LOAD LIST OF IDs ALREADY SUCCESSFULLY PROCESSED, IF ANY f = file(name + '.success', 'rU') # text file for line in f: self.already_done[line.strip()] = None f.close() except __HOLE__: # OK IF NO SUCCESS FILE YET, WE'LL CREATE ONE. pass # Success file is to be cumulative but overwrite the error file. self.successfile = file(name + '.success', 'a') self.errorfile = file(name + '.error', 'w') self.done = False self.hosts = DictAttrProxy(self.rc_server.get_hostinfo) self.register()
IOError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/Coordinator.__init__
614
def start_client(self, host): "start a processor on a client node" import tempfile if len(self.clients) >= self.ncpu_limit: print >>sys.stderr, 'start_client: blocked, CPU limit', \ len(self.clients), self.ncpu_limit return # DON'T START ANOTHER PROCESS, TOO MANY ALREADY if len(self.clients) >= self.max_clients: print >>sys.stderr, 'start_client: blocked, too many already', \ len(self.clients), self.max_clients return # DON'T START ANOTHER PROCESS, TOO MANY ALREADY try: if len(self.clients_starting[host]) >= self.max_ssh_errors: print >>sys.stderr, \ 'start_client: blocked, too many unstarted jobs:', \ host, self.clients_starting[host] return # DON'T START ANOTHER PROCESS, host MAY BE DEAD... except KeyError: # NO clients_starting ON host, GOOD! pass try: if len(self.initialization_errors[host]) >= \ self.max_initialization_errors: print >>sys.stderr, 'start_client: blocked, too many \ initialization errors:', host, \ self.initialization_errors[host] return # DON'T START ANOTHER PROCESS, host HAS A PROBLEM except KeyError: # NO initialization_errors ON host, GOOD! pass try: sshopts = self.hosts[host].sshopts # GET sshopts VIA XMLRPC except __HOLE__: sshopts = '' logfile = os.path.join(tempfile.gettempdir(), '%s_%d.log' \ % (self.name, self.iclient)) # PASS OUR KWARGS ON TO THE CLIENT PROCESSOR kwargs = ' '.join(['--%s=%s' % (k, v) for k, v in self.kwargs.items()]) cmd = 'cd %s;%s %s --url=http://%s:%d --rc_url=%s --logfile=%s %s %s' \ % (self.dir, self.python, self.script, self.host, self.port, self.rc_url, logfile, self.name, kwargs) # UGH, HAVE TO MIX CSH REDIRECTION (REMOTE) WITH SH REDIRECTION (LOCAL) ssh_cmd = "ssh %s %s '(%s) </dev/null >&%s &' </dev/null >>%s 2>&1 &" \ % (sshopts, host, cmd, logfile, self.errlog) print >>sys.stderr, 'SSH: ' + ssh_cmd self.logfile[logfile] = [host, False, self.iclient] # NO PID YET try: # RECORD THIS CLIENT AS STARTING UP self.clients_starting[host][self.iclient] = time.time() except KeyError: # CREATE A NEW HOST ENTRY self.clients_starting[host] = {self.iclient: time.time()} # RUN SSH IN BACKGROUND TO AVOID WAITING FOR IT TO TIMEOUT!!! os.system(ssh_cmd) # LAUNCH THE SSH PROCESS, SHOULD RETURN IMMEDIATELY self.iclient += 1 # ADVANCE OUR CLIENT COUNTER
AttributeError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/Coordinator.start_client
615
def register_client(self, host, pid, logfile): 'XMLRPC call to register client hostname and PID as starting_up' print >>sys.stderr, 'register_client: %s:%d' % (host, pid) self.clients[(host, pid)] = 0 try: self.logfile[logfile][1] = pid # SAVE OUR PID iclient = self.logfile[logfile][2] # GET ITS CLIENT ID del self.clients_starting[host][iclient] #REMOVE FROM STARTUP LIST except __HOLE__: print >>sys.stderr, 'no client logfile?', host, pid, logfile self.clients_initializing[(host, pid)] = logfile return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/Coordinator.register_client
616
def unregister_client(self, host, pid, message): 'XMLRPC call to remove client from register as exiting' print >>sys.stderr, 'unregister_client: %s:%d %s' \ % (host, pid, message) try: del self.clients[(host, pid)] except KeyError: print >>sys.stderr, 'unregister_client: unknown client %s:%d' \ % (host, pid) try: # REMOVE IT FROM THE LIST OF CLIENTS TO SHUTDOWN, IF PRESENT del self.stop_clients[(host, pid)] except KeyError: pass try: # REMOVE FROM INITIALIZATION LIST del self.clients_initializing[(host, pid)] except __HOLE__: pass if len(self.clients) == 0 and self.done: # No more tasks or clients, the server can exit. self.exit("Done") return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/Coordinator.unregister_client
617
def report_success(self, host, pid, success_id): 'mark task as successfully completed' # Keep permanent record of success ID. print >>self.successfile, success_id self.successfile.flush() self.nsuccess += 1 try: self.clients[(host, pid)] += 1 except __HOLE__: print >>sys.stderr, 'report_success: unknown client %s:%d' \ % (host, pid) try: del self.pending[success_id] except KeyError: print >>sys.stderr, 'report_success: unknown ID %s' \ % str(success_id) return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/Coordinator.report_success
618
def report_error(self, host, pid, id, tb_report): "get traceback report from client as text" print >>sys.stderr, "TRACEBACK: %s:%s ID %s\n%s" % \ (host, str(pid), str(id), tb_report) if (host, pid) in self.clients_initializing: logfile = self.clients_initializing[(host, pid)] try: self.initialization_errors[host].append(logfile) except KeyError: self.initialization_errors[host] = [logfile] try: del self.pending[id] except __HOLE__: # Not associated with an actual task ID, do not record. if id is not None and id is not False: print >>sys.stderr, 'report_error: unknown ID %s' % str(id) else: print >>self.errorfile, id # KEEP PERMANENT RECORD OF FAILURE ID self.nerrors += 1 self.errorfile.flush() return True # USE THIS AS DEFAULT XMLRPC RETURN VALUE
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/Coordinator.report_error
619
def next(self, host, pid, success_id): 'return next ID from iterator to the XMLRPC caller' if (host, pid) not in self.clients: print >>sys.stderr, 'next: unknown client %s:%d' % (host, pid) return False # HAND BACK "NO MORE FOR YOU TO DO" SIGNAL try: # INITIALIZATION DONE, SO REMOVE FROM INITIALIZATION LIST del self.clients_initializing[(host, pid)] except __HOLE__: pass if success_id is not False: self.report_success(host, pid, success_id) if self.done: # EXHAUSTED OUR ITERATOR, SO SHUT DOWN THIS CLIENT return False # HAND BACK "NO MORE FOR YOU TO DO" SIGNAL try: # CHECK LIST FOR COMMAND TO SHUT DOWN THIS CLIENT del self.stop_clients[(host, pid)] # IS IT IN stop_clients? return False # IF SO, HAND BACK "NO MORE FOR YOU TO DO" SIGNAL except KeyError: # DO ONE MORE CHECK: ARE WE OVER OUR MAX ALLOWED LOAD? if len(self.clients) > self.max_clients: # Yes, better throttle down. print >>sys.stderr, 'next: halting %s:too many processors \ (%d>%d)' % (host, len(self.clients), self.max_clients) return False # HAND BACK "NO MORE FOR YOU TO DO" SIGNAL for id in self.it: # GET AN ID WE CAN USE if str(id) not in self.already_done: self.n += 1 # GREAT, WE CAN USE THIS ID self.lastID = id self.pending[id] = (host, pid, time.time()) print >>sys.stderr, 'giving id %s to %s:%d' % (str(id), host, pid) return id print >>sys.stderr, 'exhausted all items from iterator!' self.done = True # EXHAUSTED OUR ITERATOR # Release our claims on any further processor allication # and inform the resource controller about it. self.priority = 0.0 self.register() return False # False IS CONFORMABLE BY XMLRPC...
KeyError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/Coordinator.next
620
def run_all(self, resultGenerator, **kwargs): "run until all task IDs completed, trap & report all errors" errors_in_a_row = 0 it = resultGenerator(self, **kwargs) # GET ITERATOR FROM GENERATOR report_time = time.time() self.register() # REGISTER WITH RESOURCE CONTROLLER & COORDINATOR initializationError = None try: # TRAP ERRORS BOTH IN USER CODE AND coordinator CODE while 1: try: # TRAP AND REPORT ALL ERRORS IN USER CODE id = it.next() # THIS RUNS USER CODE FOR ONE ITERATION self.success_id = id # MARK THIS AS A SUCCESS... errors_in_a_row = 0 initializationError = False except StopIteration: # NO MORE TASKS FOR US... if not hasattr(self, 'serverStopIteration'): # Weird! # USER CODE RAISED StopIteration?!? self.report_error(self.pending_id) # REPORT THE PROBLEM self.exit_message = 'user StopIteration error' elif initializationError: self.exit_message = 'initialization error' else: self.exit_message = 'done' break except SystemExit: # sys.exit() CALLED raise # WE REALLY DO WANT TO EXIT. except: # MUST HAVE BEEN AN ERROR IN THE USER CODE if initializationError is None: # STILL IN INITIALIZATION initializationError=True self.report_error(self.pending_id) # REPORT THE PROBLEM errors_in_a_row +=1 if errors_in_a_row>=self.max_errors_in_a_row: self.exit_message='too many errors' break if time.time()-report_time>self.report_frequency: self.report_load() # SEND A ROUTINE LOAD REPORT report_time=time.time() except __HOLE__: # sys.exit() CALLED pass # WE REALLY DO WANT TO EXIT. except: # IMPORTANT TO TRAP ALL ERRORS SO THAT WE UNREGISTER!! traceback.print_exc(self.max_tb, sys.stderr) #REPORT TB TO OUR LOG self.exit_message='error trap' self.unregister('run_all '+self.exit_message) # MUST UNREGISTER!!
SystemExit
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/Processor.run_all
621
def parse_argv(): """parse sys.argv into a dictionary of GNU-style args (--foo=bar) and a list of other args""" d = {} l = [] for v in sys.argv[1:]: if v[:2] == '--': try: k, v = v[2:].split('=') d[k] = v except __HOLE__: d[v[2:]] = None else: l.append(v) return d, l
ValueError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/parse_argv
622
def start_client_or_server(clientGenerator, serverGenerator, resources, script): """start controller, client or server depending on whether we get coordinator argument from the command-line args. Client must be a generator function that takes Processor as argument, and uses it as an iterator. Also, clientGenerator must yield the IDs that the Processor provides (this structure allows us to trap all exceptions from clientGenerator, while allowing it to do resource initializations that would be much less elegant in a callback function.) Server must be a function that returns an iterator (e.g. a generator). Resources is a list of strings naming the resources we need copied to local host for client to be able to do its work. Both client and server constructors use **kwargs to get command line arguments (passed as GNU-style --foo=bar; see the constructor arguments to see the list of options that each can be passed. #CALL LIKE THIS FROM yourscript.py: import coordinator if __name__ == '__main__': coordinator.start_client_or_server(clientGen, serverGen, resources,__file__) To start the resource controller: python coordinator.py --rc=NAME [options] To start a job coordinator: python yourscript.py NAME [--rc_url=URL] [options] To start a job processor: python yourscript.py --url=URL --rc_url=URL [options]""" d, l = parse_argv() if 'url' in d: # WE ARE A CLIENT! client = Processor(**d) time.sleep(5) # GIVE THE SERVER SOME BREATHING SPACE client.run_all(clientGenerator, **d) elif 'rc' in d: # WE ARE THE RESOURCE CONTROLLER rc_server = ResourceController(**d) # NAME FOR THIS CONTROLLER... detach_as_demon_process(rc_server) rc_server() # START THE SERVER else: # WE ARE A SERVER try: # PASS OUR KWARGS TO THE SERVER FUNCTION it = serverGenerator(**d) except __HOLE__: # DOESN'T WANT ANY ARGS? it = serverGenerator() server = Coordinator(l[0], script, it, resources, **d) detach_as_demon_process(server) server() # START THE SERVER
TypeError
dataset/ETHPy150Open cjlee112/pygr/pygr/coordinator.py/start_client_or_server
623
def _run_pending_tasks(): now = time.time() old_pending = list(_pending_delayed_tasks) del _pending_delayed_tasks[:] for t in old_pending: if not _main_loop_running: return if now >= t.run_at_or_after: try: t.cb() except __HOLE__: raise except: _on_exception() else: _pending_delayed_tasks.append(t) _pending_delayed_tasks.sort(lambda x, y: cmp(x, y))
KeyboardInterrupt
dataset/ETHPy150Open natduca/quickopen/src/message_loop_curses.py/_run_pending_tasks
624
def run_main_loop(): global _old_std _old_std = [ sys.stdout, sys.stderr ] if DEBUG: tempStdout = open('/tmp/quickopen.stdout', 'w', 0) sys.stdout = tempStdout sys.stderr = sys.stdout else: tempStdout = cStringIO.StringIO() sys.stdout = tempStdout sys.stderr = sys.stdout assert not is_main_loop_running() if _unittests_running and not _active_test: del _pending_delayed_tasks[:] del _quit_handlers[:] raise Exception("UITestCase must be used for tests that use the message_loop.") global _main_loop_running global _stdscr global _quitting def main(stdscr): global _stdscr _stdscr = stdscr while _main_loop_running: now = time.time() if len(_pending_delayed_tasks) > 0: delay = max(0, _pending_delayed_tasks[0].run_at_or_after - now) else: delay = 0.1 try: r, w, e = select.select([sys.stdin], [], [], delay) except __HOLE__: raise except: continue if not _main_loop_running: break if r: if on_terminal_readable.has_listeners: on_terminal_readable.fire() else: print "unhandled character:", _stdscr.getch() _run_pending_tasks() try: _main_loop_running = True curses.wrapper(main) except KeyboardInterrupt: traceback.print_exc() raise except: traceback.print_exc() finally: _stdscr = None _quitting = False _main_loop_running = False del _pending_delayed_tasks[:] sys.stdout = _old_std[0] sys.stderr = _old_std[1] if DEBUG: tempStdout.close() res = open('/tmp/quickopen.stdout') sys.stdout.write(res.read()) res.close() else: sys.stdout.write(tempStdout.getvalue())
KeyboardInterrupt
dataset/ETHPy150Open natduca/quickopen/src/message_loop_curses.py/run_main_loop
625
def format(self, record): """Returns a JSON string based on a LogRecord instance. Args: record: A LogRecord instance. See below for details. Returns: A JSON string representing the record. A LogRecord instance has the following attributes and is used for formatting the final message. Attributes: created: A double representing the timestamp for record creation (e.g., 1438365207.624597). Note that the number contains also msecs and microsecs information. Part of this is also available in the 'msecs' attribute. msecs: A double representing the msecs part of the record creation (e.g., 624.5970726013184). msg: Logging message containing formatting instructions or an arbitrary object. This is the first argument of a log call. args: A tuple containing the positional arguments for the logging call. levelname: A string. Possible values are: INFO, WARNING, ERROR, etc. exc_info: None or a 3-tuple with exception information as it is returned by a call to sys.exc_info(). name: Logger's name. Most logging is done using the default root logger and therefore the name will be 'root'. filename: Basename of the file where logging occurred. funcName: Name of the function where logging occurred. process: The PID of the process running the worker. thread: An id for the thread where the record was logged. This is not a real TID (the one provided by OS) but rather the id (address) of a Python thread object. Nevertheless having this value can allow to filter log statement from only one specific thread. """ output = {} output['timestamp'] = { 'seconds': int(record.created), 'nanos': int(record.msecs * 1000000)} # ERROR. INFO, DEBUG log levels translate into the same for severity # property. WARNING becomes WARN. output['severity'] = ( record.levelname if record.levelname != 'WARNING' else 'WARN') # msg could be an arbitrary object, convert it to a string first. record_msg = str(record.msg) # Prepare the actual message using the message formatting string and the # positional arguments as they have been used in the log call. if record.args: try: output['message'] = record_msg % record.args except (__HOLE__, ValueError): output['message'] = '%s with args (%s)' % (record_msg, record.args) else: output['message'] = record_msg # The thread ID is logged as a combination of the process ID and thread ID # since workers can run in multiple processes. output['thread'] = '%s:%s' % (record.process, record.thread) # job ID and worker ID. These do not change during the lifetime of a worker. output['job'] = self.job_id output['worker'] = self.worker_id # Stage, step and work item ID come from thread local storage since they # change with every new work item leased for execution. If there is no # work item ID then we make sure the step is undefined too. data = per_thread_worker_data.get_data() if 'work_item_id' in data: output['work'] = data['work_item_id'] if 'stage_name' in data: output['stage'] = data['stage_name'] if 'step_name' in data: output['step'] = data['step_name'] # All logging happens using the root logger. We will add the basename of the # file and the function name where the logging happened to make it easier # to identify who generated the record. output['logger'] = '%s:%s:%s' % ( record.name, record.filename, record.funcName) # Add exception information if any is available. if record.exc_info: output['exception'] = ''.join( traceback.format_exception(*record.exc_info)) return json.dumps(output)
TypeError
dataset/ETHPy150Open GoogleCloudPlatform/DataflowPythonSDK/google/cloud/dataflow/worker/logger.py/JsonLogFormatter.format
626
def test_argument_checking(self): self.assertRaises(TypeError, self.thetype) # need at least a func arg try: self.thetype(2)() except __HOLE__: pass else: self.fail('First arg not checked for callability')
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_functools.py/TestPartial.test_argument_checking
627
@unittest.skip("FIXME: Not working in Jython.") def test_attributes(self): p = self.thetype(hex) try: del p.__dict__ except __HOLE__: pass else: self.fail('partial object allowed __dict__ to be deleted')
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_functools.py/TestPartial.test_attributes
628
def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--drop-data', default=False, action="store_true") parser.add_argument('-k', '--keep-torrent', default=False, action="store_true") args = parser.parse_args() if sys.stdin.isatty(): parser.error('no input, pipe another btc command output into this command') torrents = sys.stdin.read() if len(torrents.strip()) == 0: exit(1) try: torrents = decoder.decode(torrents) except __HOLE__: error('unexpected input: %s' % torrents) hashes = [t['hash'] for t in torrents] for h in hashes: client.remove_torrent(h, keep_data=not args.drop_data, keep_torrent=args.keep_torrent) while True: l = client.list_torrents() all_removed = True for t in l: if t['hash'] in hashes: all_removed = False break if all_removed: break time.sleep(1)
ValueError
dataset/ETHPy150Open bittorrent/btc/btc/btc_remove.py/main
629
def __repr__(self): try: u = six.text_type(self) except (UnicodeEncodeError, __HOLE__): u = '[Bad Unicode data]' return force_str('<%s: %s>' % (self.__class__.__name__, u))
UnicodeDecodeError
dataset/ETHPy150Open thoas/django-sequere/sequere/contrib/timeline/action.py/Action.__repr__
630
def is_int(self, text): try: int(text) return True except __HOLE__: return False
ValueError
dataset/ETHPy150Open cloudera/hue/apps/oozie/src/oozie/models2.py/Dataset.is_int
631
def execute(self, item, context): try: obj, action = item except __HOLE__: self.logger.exception('Error executing on item: {0}'.format(item)) return try: func = getattr(self.sqlite_manager, action) except AttributeError: self.logger.exception( 'unable to run action, {0}, no such method'.format(action) ) raise try: if isinstance(obj, tuple): func(*obj) else: func(obj) except OperationalError: self.logger.exception("Unsupported operation") return
ValueError
dataset/ETHPy150Open facebook/augmented-traffic-control/atc/atcd/atcd/AtcdDBQueueTask.py/AtcdDBQueueTask.execute
632
def validate_document(self, document): self.schema = document.schema collection_name, errors = type(document).__name__, {} self.validate(document.document) for key, _errors in self.flattened_errors.items(): field = key field_schema = self.get_field_schema(field) _errors = [_errors] if not isinstance(_errors, list) else _errors for _error in _errors: error = None if _error == 'unknown field': error = UnknownFieldError(collection_name, field) elif _error == 'required field': error = RequiredFieldError(collection_name, field) elif _error == 'field is read-only': error = ReadOnlyFieldError(collection_name, field) elif _error.startswith('must be ') and _error.endswith('type'): type_or_types = field_schema['type'] if isinstance(type_or_types, list): type_or_types = ' or '.join(type_or_types) error = FieldTypeError( collection_name, field, type_or_types ) elif 'does not match regex' in _error: error = RegexMatchError( collection_name, field, _error.split('match regex \'')[1][:-1] ) elif _error.startswith('min length is'): error = MinLengthError( collection_name, field, field_schema['minlength'] ) elif _error.startswith('max length is'): error = MaxLengthError( collection_name, field, field_schema['maxlength'] ) elif _error.startswith('length of '): error = LengthError( collection_name, field, len(field_schema['items']) ) elif _error.startswith('unallowed value '): error = ValueNotAllowedError( collection_name, field, _error.split('unallowed value ')[1] ) elif _error.startswith('unallowed values '): error = ValuesNotAllowedError( collection_name, field, _error.split('unallowed values ')[1] ) elif _error.startswith('min value is '): error = MinValueError( collection_name, field, field_schema['min'] ) elif _error.startswith('max value is '): error = MaxValueError( collection_name, field, field_schema['max'] ) if error and isinstance(error, SchemaValidationError): try: errors['errors'].append(error) except __HOLE__: errors = DocumentValidationError( collection_name, self.schema, document.document, [error] ) if errors: if 'errors' in errors: errors['errors'] = sorted( errors['errors'], key=lambda e: e['field'] ) document._errors = errors return False return True
KeyError
dataset/ETHPy150Open lvieirajr/mongorest/mongorest/validator.py/Validator.validate_document
633
def patch_json(): try: import ujson # noqa except __HOLE__: # ujson is not available, we won't patch anything return patch_module('json', ['dumps', 'loads'])
ImportError
dataset/ETHPy150Open ziirish/burp-ui/burpui/_compat.py/patch_json
634
def _transform_object(self, obj): wrapped = Transformable(obj) error_component = wrapped['error_component'].resolve() if error_component is not None and error_component == 'connect': raise errors.IgnoreObject("Error connecting") banner = wrapped['data']['banner'].resolve() starttls = wrapped['data']['starttls'].resolve() zout = ZMapTransformOutput() try: tls_handshake = obj['data']['tls'] out, certificates = https.HTTPSTransform.make_tls_obj(tls_handshake) zout.transformed['tls'] = out zout.certificates = certificates except (TypeError, __HOLE__, IndexError): pass if banner is not None: zout.transformed['banner'] = self.clean_banner(banner) if starttls is not None: zout.transformed['starttls'] = self.clean_banner(starttls) if len(zout.transformed) == 0: raise errors.IgnoreObject("Empty output dict") return zout
KeyError
dataset/ETHPy150Open zmap/ztag/ztag/transforms/pop3.py/POP3StartTLSTransform._transform_object
635
def _transform_object(self, obj): wrapped = Transformable(obj) error_component = wrapped['error_component'].resolve() if error_component is not None and error_component == 'connect': raise errors.IgnoreObject("Error connecting") banner = wrapped['data']['banner'].resolve() zout = ZMapTransformOutput() try: tls_handshake = obj['data']['tls'] out, certificates = https.HTTPSTransform.make_tls_obj(tls_handshake) zout.transformed['tls'] = out zout.certificates = certificates except (TypeError, KeyError, __HOLE__): pass if banner is not None: zout.transformed['banner'] = self.clean_banner(banner) if len(zout.transformed) == 0: raise errors.IgnoreObject("Empty output dict") return zout
IndexError
dataset/ETHPy150Open zmap/ztag/ztag/transforms/pop3.py/POP3STransform._transform_object
636
def _UnlockedVerifyConfig(self): """Verify function. @rtype: list @return: a list of error messages; a non-empty list signifies configuration errors """ # pylint: disable=R0914 result = [] seen_macs = [] ports = {} data = self._ConfigData() cluster = data.cluster # First call WConfd to perform its checks, if we're not offline if not self._offline: try: self._wconfd.VerifyConfig() except errors.ConfigVerifyError, err: try: for msg in err.args[1]: result.append(msg) except __HOLE__: pass # check cluster parameters VerifyType("cluster", "beparams", cluster.SimpleFillBE({}), constants.BES_PARAMETER_TYPES, result.append) VerifyType("cluster", "nicparams", cluster.SimpleFillNIC({}), constants.NICS_PARAMETER_TYPES, result.append) VerifyNic("cluster", cluster.SimpleFillNIC({}), result.append) VerifyType("cluster", "ndparams", cluster.SimpleFillND({}), constants.NDS_PARAMETER_TYPES, result.append) VerifyIpolicy("cluster", cluster.ipolicy, True, result.append) for disk_template in cluster.diskparams: if disk_template not in constants.DTS_HAVE_ACCESS: continue access = cluster.diskparams[disk_template].get(constants.LDP_ACCESS, constants.DISK_KERNELSPACE) if access not in constants.DISK_VALID_ACCESS_MODES: result.append( "Invalid value of '%s:%s': '%s' (expected one of %s)" % ( disk_template, constants.LDP_ACCESS, access, utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES) ) ) self._VerifyDisks(data, result) # per-instance checks for instance_uuid in data.instances: instance = data.instances[instance_uuid] if instance.uuid != instance_uuid: result.append("instance '%s' is indexed by wrong UUID '%s'" % (instance.name, instance_uuid)) if instance.primary_node not in data.nodes: result.append("instance '%s' has invalid primary node '%s'" % (instance.name, instance.primary_node)) for snode in self._UnlockedGetInstanceSecondaryNodes(instance.uuid): if snode not in data.nodes: result.append("instance '%s' has invalid secondary node '%s'" % (instance.name, snode)) for idx, nic in enumerate(instance.nics): if nic.mac in seen_macs: result.append("instance '%s' has NIC %d mac %s duplicate" % (instance.name, idx, nic.mac)) else: seen_macs.append(nic.mac) if nic.nicparams: filled = cluster.SimpleFillNIC(nic.nicparams) owner = "instance %s nic %d" % (instance.name, idx) VerifyType(owner, "nicparams", filled, constants.NICS_PARAMETER_TYPES, result.append) VerifyNic(owner, filled, result.append) # parameter checks if instance.beparams: VerifyType("instance %s" % instance.name, "beparams", cluster.FillBE(instance), constants.BES_PARAMETER_TYPES, result.append) # check that disks exists for disk_uuid in instance.disks: if disk_uuid not in data.disks: result.append("Instance '%s' has invalid disk '%s'" % (instance.name, disk_uuid)) instance_disks = self._UnlockedGetInstanceDisks(instance.uuid) # gather the drbd ports for duplicate checks for (idx, dsk) in enumerate(instance_disks): if dsk.dev_type in constants.DTS_DRBD: tcp_port = dsk.logical_id[2] if tcp_port not in ports: ports[tcp_port] = [] ports[tcp_port].append((instance.name, "drbd disk %s" % idx)) # gather network port reservation net_port = getattr(instance, "network_port", None) if net_port is not None: if net_port not in ports: ports[net_port] = [] ports[net_port].append((instance.name, "network port")) wrong_names = _CheckInstanceDiskIvNames(instance_disks) if wrong_names: tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" % (idx, exp_name, actual_name)) for (idx, exp_name, actual_name) in wrong_names) result.append("Instance '%s' has wrongly named disks: %s" % (instance.name, tmp)) # cluster-wide pool of free ports for free_port in cluster.tcpudp_port_pool: if free_port not in ports: ports[free_port] = [] ports[free_port].append(("cluster", "port marked as free")) # compute tcp/udp duplicate ports keys = ports.keys() keys.sort() for pnum in keys: pdata = ports[pnum] if len(pdata) > 1: txt = utils.CommaJoin(["%s/%s" % val for val in pdata]) result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt)) # highest used tcp port check if keys: if keys[-1] > cluster.highest_used_port: result.append("Highest used port mismatch, saved %s, computed %s" % (cluster.highest_used_port, keys[-1])) if not data.nodes[cluster.master_node].master_candidate: result.append("Master node is not a master candidate") # master candidate checks mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats() if mc_now < mc_max: result.append("Not enough master candidates: actual %d, target %d" % (mc_now, mc_max)) # node checks for node_uuid, node in data.nodes.items(): if node.uuid != node_uuid: result.append("Node '%s' is indexed by wrong UUID '%s'" % (node.name, node_uuid)) if [node.master_candidate, node.drained, node.offline].count(True) > 1: result.append("Node %s state is invalid: master_candidate=%s," " drain=%s, offline=%s" % (node.name, node.master_candidate, node.drained, node.offline)) if node.group not in data.nodegroups: result.append("Node '%s' has invalid group '%s'" % (node.name, node.group)) else: VerifyType("node %s" % node.name, "ndparams", cluster.FillND(node, data.nodegroups[node.group]), constants.NDS_PARAMETER_TYPES, result.append) used_globals = constants.NDC_GLOBALS.intersection(node.ndparams) if used_globals: result.append("Node '%s' has some global parameters set: %s" % (node.name, utils.CommaJoin(used_globals))) # nodegroups checks nodegroups_names = set() for nodegroup_uuid in data.nodegroups: nodegroup = data.nodegroups[nodegroup_uuid] if nodegroup.uuid != nodegroup_uuid: result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'" % (nodegroup.name, nodegroup.uuid, nodegroup_uuid)) if utils.UUID_RE.match(nodegroup.name.lower()): result.append("node group '%s' (uuid: '%s') has uuid-like name" % (nodegroup.name, nodegroup.uuid)) if nodegroup.name in nodegroups_names: result.append("duplicate node group name '%s'" % nodegroup.name) else: nodegroups_names.add(nodegroup.name) group_name = "group %s" % nodegroup.name VerifyIpolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy), False, result.append) if nodegroup.ndparams: VerifyType(group_name, "ndparams", cluster.SimpleFillND(nodegroup.ndparams), constants.NDS_PARAMETER_TYPES, result.append) # drbd minors check # FIXME: The check for DRBD map needs to be implemented in WConfd # IP checks default_nicparams = cluster.nicparams[constants.PP_DEFAULT] ips = {} def _AddIpAddress(ip, name): ips.setdefault(ip, []).append(name) _AddIpAddress(cluster.master_ip, "cluster_ip") for node in data.nodes.values(): _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name) if node.secondary_ip != node.primary_ip: _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name) for instance in data.instances.values(): for idx, nic in enumerate(instance.nics): if nic.ip is None: continue nicparams = objects.FillDict(default_nicparams, nic.nicparams) nic_mode = nicparams[constants.NIC_MODE] nic_link = nicparams[constants.NIC_LINK] if nic_mode == constants.NIC_MODE_BRIDGED: link = "bridge:%s" % nic_link elif nic_mode == constants.NIC_MODE_ROUTED: link = "route:%s" % nic_link elif nic_mode == constants.NIC_MODE_OVS: link = "ovs:%s" % nic_link else: raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode) _AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network), "instance:%s/nic:%d" % (instance.name, idx)) for ip, owners in ips.items(): if len(owners) > 1: result.append("IP address %s is used by multiple owners: %s" % (ip, utils.CommaJoin(owners))) return result
IndexError
dataset/ETHPy150Open ganeti/ganeti/lib/config/__init__.py/ConfigWriter._UnlockedVerifyConfig
637
@ConfigSync() def AssignGroupNodes(self, mods): """Changes the group of a number of nodes. @type mods: list of tuples; (node name, new group UUID) @param mods: Node membership modifications """ groups = self._ConfigData().nodegroups nodes = self._ConfigData().nodes resmod = [] # Try to resolve UUIDs first for (node_uuid, new_group_uuid) in mods: try: node = nodes[node_uuid] except __HOLE__: raise errors.ConfigurationError("Unable to find node '%s'" % node_uuid) if node.group == new_group_uuid: # Node is being assigned to its current group logging.debug("Node '%s' was assigned to its current group (%s)", node_uuid, node.group) continue # Try to find current group of node try: old_group = groups[node.group] except KeyError: raise errors.ConfigurationError("Unable to find old group '%s'" % node.group) # Try to find new group for node try: new_group = groups[new_group_uuid] except KeyError: raise errors.ConfigurationError("Unable to find new group '%s'" % new_group_uuid) assert node.uuid in old_group.members, \ ("Inconsistent configuration: node '%s' not listed in members for its" " old group '%s'" % (node.uuid, old_group.uuid)) assert node.uuid not in new_group.members, \ ("Inconsistent configuration: node '%s' already listed in members for" " its new group '%s'" % (node.uuid, new_group.uuid)) resmod.append((node, old_group, new_group)) # Apply changes for (node, old_group, new_group) in resmod: assert node.uuid != new_group.uuid and old_group.uuid != new_group.uuid, \ "Assigning to current group is not possible" node.group = new_group.uuid # Update members of involved groups if node.uuid in old_group.members: old_group.members.remove(node.uuid) if node.uuid not in new_group.members: new_group.members.append(node.uuid) # Update timestamps and serials (only once per node/group object) now = time.time() for obj in frozenset(itertools.chain(*resmod)): # pylint: disable=W0142 obj.serial_no += 1 obj.mtime = now # Force ssconf update self._ConfigData().cluster.serial_no += 1
KeyError
dataset/ETHPy150Open ganeti/ganeti/lib/config/__init__.py/ConfigWriter.AssignGroupNodes
638
def publish(self, argv=None, usage=None, description=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=None): """ Process command line options and arguments (if `self.settings` not already set), run `self.reader` and then `self.writer`. Return `self.writer`'s output. """ exit = None try: if self.settings is None: self.process_command_line( argv, usage, description, settings_spec, config_section, **(settings_overrides or {})) self.set_io() self.document = self.reader.read(self.source, self.parser, self.settings) self.apply_transforms() output = self.writer.write(self.document, self.destination) self.writer.assemble_parts() except __HOLE__, error: exit = 1 exit_status = error.code except Exception, error: if not self.settings: # exception too early to report nicely raise if self.settings.traceback: # Propagate exceptions? self.debugging_dumps() raise self.report_Exception(error) exit = 1 exit_status = 1 self.debugging_dumps() if (enable_exit_status and self.document and (self.document.reporter.max_level >= self.settings.exit_status_level)): sys.exit(self.document.reporter.max_level + 10) elif exit: sys.exit(exit_status) return output
SystemExit
dataset/ETHPy150Open adieu/allbuttonspressed/docutils/core.py/Publisher.publish
639
def __iter__(self): for line in self.fp: m = log_re.match(line.strip()) d = m.groupdict() d['remote_addr'] = d['remote_addr'].replace('"', '') try: request = d.pop('request') method, path, httpver = request.split(' ') except ValueError: method, path, httpver = None, None, None try: d['bytes'] = int(d['bytes']) except __HOLE__: d['bytes'] = 0 d['status'] = int(d['status']) yield d
ValueError
dataset/ETHPy150Open samuel/squawk/squawk/parsers/access_log.py/AccessLogParser.__iter__
640
def __getattr__(self, name): try: return self[name] except __HOLE__: raise AttributeError(name)
KeyError
dataset/ETHPy150Open QuantEcon/QuantEcon.py/quantecon/markov/ddp.py/DPSolveResult.__getattr__
641
@property def OptionParser(self): if self._optparse is None: try: me = 'repo %s' % self.NAME usage = self.helpUsage.strip().replace('%prog', me) except __HOLE__: usage = 'repo %s' % self.NAME self._optparse = optparse.OptionParser(usage = usage) self._Options(self._optparse) return self._optparse
AttributeError
dataset/ETHPy150Open esrlabs/git-repo/command.py/Command.OptionParser
642
def _GetProjectByPath(self, manifest, path): project = None if os.path.exists(path): oldpath = None while path \ and path != oldpath \ and path != manifest.topdir: try: project = self._by_path[path] break except KeyError: oldpath = path path = os.path.dirname(path) else: try: project = self._by_path[path] except __HOLE__: pass return project
KeyError
dataset/ETHPy150Open esrlabs/git-repo/command.py/Command._GetProjectByPath
643
def __init__(self, event_type, timestamp, **kwargs): self.event_type = event_type # All events have these two attributes self.timestamp = timestamp for attribute in self.KNOWN_ATTRIBUTES: try: self._set(attribute, kwargs[attribute]) except __HOLE__: marathon.log.warn( 'Unknown event attribute processing event {}: {}'.format(event_type, attribute))
KeyError
dataset/ETHPy150Open thefactory/marathon-python/marathon/models/events.py/MarathonEvent.__init__
644
def send(self, msg): messagestr = pickle.dumps(msg) message = struct.pack("I", len(messagestr)) + messagestr try: while len(message) > 0: try: bytesent = self.thesocket.send(message) message = message[bytesent:] except IOError, e: if isinstance(e.args, tuple): if e[0] == errno.EAGAIN: continue else: raise e except AttributeError, e: raise e return True except socket.error, e: if isinstance(e.args, tuple): if e[0] == errno.EPIPE: print "Remote disconnect" return False except IOError, e: print "Send Error: ", e except __HOLE__, e: print "Socket deleted." return False
AttributeError
dataset/ETHPy150Open denizalti/concoord/concoord/object/pinger.py/Pinger.send
645
@magic.line_magic @magic_arguments() @argument('address', type=hexint_tuple, nargs='+', help='Single hex address, or a range start:end including both endpoints') def watch(self, line): """Watch memory for changes, shows the results in an ASCII data table. To use the results programmatically, see the watch_scanner() and watch_tabulator() functions. Keeps running until you kill it with a KeyboardInterrupt. """ args = parse_argstring(self.watch, line) d = self.shell.user_ns['d'] changes = watch_scanner(d, args.address) try: for line in watch_tabulator(changes): self.shell.write(line + '\n') except __HOLE__: pass
KeyboardInterrupt
dataset/ETHPy150Open scanlime/coastermelt/backdoor/shell_magics.py/ShellMagics.watch
646
@magic.line_magic @magic_arguments() @argument('address', type=hexint_aligned, nargs='?') @argument('wordcount', type=hexint, nargs='?', default=1, help='Number of words to remap') @argument('-d', '--delay', type=float, default=0.05, metavar='SEC', help='Add a delay between rounds') @argument('-p', '--period', type=int, default=8, metavar='N', help='Number of rounds per cycle repeat') def bitfuzz(self, line): """Scan a small number of words in binary while writing 00000000/ffffffff patterns. This can help determine the implementation of bits in an MMIO register. """ args = parse_argstring(self.bitfuzz, line) d = self.shell.user_ns['d'] try: for line in bitfuzz_rounds(d, args.address, args.wordcount, args.period, args.delay): print line except __HOLE__: return
KeyboardInterrupt
dataset/ETHPy150Open scanlime/coastermelt/backdoor/shell_magics.py/ShellMagics.bitfuzz
647
def download_appstats(servername, appid, path, secure, rpc_server_factory, filename, appdir, merge, java_application): """Invoke remote_api to download appstats data.""" if os.path.isdir(appdir): sys.path.insert(0, appdir) try: logging.info('Importing appengine_config from %s', appdir) import appengine_config except __HOLE__, err: logging.warn('Failed to load appengine_config: %s', err) remote_api_stub.ConfigureRemoteApi(appid, path, auth_func, servername=servername, save_cookies=True, secure=secure, rpc_server_factory=rpc_server_factory) remote_api_stub.MaybeInvokeAuthentication() os.environ['SERVER_SOFTWARE'] = 'Development (remote_api_shell)/1.0' if not appid: appid = os.environ['APPLICATION_ID'] download_data(filename, merge, java_application)
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/tools/download_appstats.py/download_appstats
648
def download_data(filename, merge, java_application): """Download appstats data from memcache.""" oldrecords = [] oldfile = None if merge: try: oldfile = open(filename, 'rb') except __HOLE__: logging.info('No file to merge. Creating new file %s', filename) if oldfile: logging.info('Merging with existing file %s', filename) oldrecords = loader.UnpickleFromFile(oldfile) oldfile.close() if oldrecords: last_timestamp = oldrecords[0].start_timestamp_milliseconds() records = loader.FromMemcache(filter_timestamp=last_timestamp, java_application=java_application) else: records = loader.FromMemcache(java_application=java_application) merged_records = records + oldrecords try: outfile = open(filename, 'wb') except IOError: logging.error('Cannot open %s', filename) return loader.PickleToFile(merged_records, outfile) outfile.close()
IOError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/tools/download_appstats.py/download_data
649
def run_from_argv(self, argv): """ Changes the option_list to use the options from the wrapped command. Adds schema parameter to specify which schema will be used when executing the wrapped command. """ # load the command object. try: app_name = get_commands()[argv[2]] except __HOLE__: raise CommandError("Unknown command: %r" % argv[2]) if isinstance(app_name, BaseCommand): # if the command is already loaded, use it directly. klass = app_name else: klass = load_command_class(app_name, argv[2]) # Ugly, but works. Delete tenant_command from the argv, parse the schema manually # and forward the rest of the arguments to the actual command being wrapped. del argv[1] schema_parser = argparse.ArgumentParser() schema_parser.add_argument("-s", "--schema", dest="schema_name", help="specify tenant schema") schema_namespace, args = schema_parser.parse_known_args(argv) tenant = self.get_tenant_from_options_or_interactive(schema_name=schema_namespace.schema_name) connection.set_tenant(tenant) klass.run_from_argv(args)
KeyError
dataset/ETHPy150Open bernardopires/django-tenant-schemas/tenant_schemas/management/commands/tenant_command.py/Command.run_from_argv
650
def __init__(self, *args, **kwds): """ A dictionary which maintains the insertion order of keys. """ if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__end except __HOLE__: self.clear() self.update(*args, **kwds)
AttributeError
dataset/ETHPy150Open datastax/python-driver/cassandra/util.py/OrderedDict.__init__
651
def pop(self): if self._pending_removals: self._commit_removals() while True: try: itemref = self.data.pop() except __HOLE__: raise KeyError('pop from empty WeakSet') item = itemref() if item is not None: return item
KeyError
dataset/ETHPy150Open datastax/python-driver/cassandra/util.py/WeakSet.pop
652
def __eq__(self, other): if isinstance(other, self.__class__): return self._items == other._items else: try: return len(other) == len(self._items) and all(item in self for item in other) except __HOLE__: return NotImplemented
TypeError
dataset/ETHPy150Open datastax/python-driver/cassandra/util.py/SortedSet.__eq__
653
def __ne__(self, other): if isinstance(other, self.__class__): return self._items != other._items else: try: return len(other) != len(self._items) or any(item not in self for item in other) except __HOLE__: return NotImplemented
TypeError
dataset/ETHPy150Open datastax/python-driver/cassandra/util.py/SortedSet.__ne__
654
def __getitem__(self, key): try: index = self._index[self._serialize_key(key)] return self._items[index][1] except __HOLE__: raise KeyError(str(key))
KeyError
dataset/ETHPy150Open datastax/python-driver/cassandra/util.py/OrderedMap.__getitem__
655
def __delitem__(self, key): # not efficient -- for convenience only try: index = self._index.pop(self._serialize_key(key)) self._index = dict((k, i if i < index else i - 1) for k, i in self._index.items()) self._items.pop(index) except __HOLE__: raise KeyError(str(key))
KeyError
dataset/ETHPy150Open datastax/python-driver/cassandra/util.py/OrderedMap.__delitem__
656
def __eq__(self, other): if isinstance(other, OrderedMap): return self._items == other._items try: d = dict(other) return len(d) == len(self._items) and all(i[1] == d[i[0]] for i in self._items) except KeyError: return False except __HOLE__: pass return NotImplemented
TypeError
dataset/ETHPy150Open datastax/python-driver/cassandra/util.py/OrderedMap.__eq__
657
def popitem(self): try: kv = self._items.pop() del self._index[self._serialize_key(kv[0])] return kv except __HOLE__: raise KeyError()
IndexError
dataset/ETHPy150Open datastax/python-driver/cassandra/util.py/OrderedMap.popitem
658
def _from_timestring(self, s): try: parts = s.split('.') base_time = time.strptime(parts[0], "%H:%M:%S") self.nanosecond_time = (base_time.tm_hour * Time.HOUR + base_time.tm_min * Time.MINUTE + base_time.tm_sec * Time.SECOND) if len(parts) > 1: # right pad to 9 digits nano_time_str = parts[1] + "0" * (9 - len(parts[1])) self.nanosecond_time += int(nano_time_str) except __HOLE__: raise ValueError("can't interpret %r as a time" % (s,))
ValueError
dataset/ETHPy150Open datastax/python-driver/cassandra/util.py/Time._from_timestring
659
def read(self): """ Validate pidfile and make it stale if needed""" if not self.fname: return try: with open(self.fname, "r") as f: wpid = int(f.read() or 0) if wpid <= 0: return return wpid except __HOLE__: return
IOError
dataset/ETHPy150Open quantmind/pulsar/pulsar/utils/tools/pidfile.py/Pidfile.read
660
def to_python(self, value): if value and isinstance(value, basestring): try: return json.loads(value) except __HOLE__: raise ValidationError("Invalid JSON")#TODO more descriptive error? return None
ValueError
dataset/ETHPy150Open zbyte64/django-dockit/dockit/forms/fields.py/HiddenJSONField.to_python
661
def clean(self, data, initial=None): ret = list() for i, data_item in enumerate(data): if data_item.get('DELETE', False): continue if initial and len(initial) > i: initial_item = initial[i] else: initial_item = None val = self.subfield.bound_data(data_item['value'], initial_item) arg_spec = inspect.getargspec(self.subfield.clean) try: if len(arg_spec.args) > 2: val = self.subfield.clean(val, initial_item) else: val = self.subfield.clean(val) except __HOLE__: if val: raise else: if val: ret.append((val, data_item.get('ORDER', None))) def compare_ordering_key(k): if k[1] is None: return (1, 0) # +infinity, larger than any number return (0, k[1]) ret.sort(key=compare_ordering_key) return [item[0] for item in ret]
ValidationError
dataset/ETHPy150Open zbyte64/django-dockit/dockit/forms/fields.py/PrimitiveListField.clean
662
def _shift(self): try: self._next = self._iterator.next() except __HOLE__: self._has_next = False else: self._has_next = True
StopIteration
dataset/ETHPy150Open riffm/mint/mint.py/Looper._shift
663
def iter_changed(interval=1): mtimes = {} while 1: for filename in all_files_by_mask('*.mint'): try: mtime = os.stat(filename).st_mtime except __HOLE__: continue old_time = mtimes.get(filename) if old_time is None: mtimes[filename] = mtime continue elif mtime > old_time: mtimes[filename] = mtime yield filename time.sleep(interval)
OSError
dataset/ETHPy150Open riffm/mint/mint.py/iter_changed
664
def on_selection_modified_async(self, view): """Called when the selection changes (cursor moves or text selected).""" if self.is_scratch(view): return view = self.get_focused_view_id(view) if view is None: return vid = view.id() # Get the line number of the first line of the first selection. try: lineno = view.rowcol(view.sel()[0].begin())[0] except __HOLE__: lineno = -1 if vid in persist.errors: errors = persist.errors[vid] if errors: lines = sorted(list(errors)) counts = [len(errors[line]) for line in lines] count = sum(counts) plural = 's' if count > 1 else '' if lineno in errors: # Sort the errors by column line_errors = sorted(errors[lineno], key=lambda error: error[0]) line_errors = [error[1] for error in line_errors] if plural: # Sum the errors before the first error on this line index = lines.index(lineno) first = sum(counts[0:index]) + 1 if len(line_errors) > 1: last = first + len(line_errors) - 1 status = '{}-{} of {} errors: '.format(first, last, count) else: status = '{} of {} errors: '.format(first, count) else: status = 'Error: ' status += '; '.join(line_errors) else: status = '%i error%s' % (count, plural) view.set_status('sublimelinter', status) else: view.erase_status('sublimelinter')
IndexError
dataset/ETHPy150Open SublimeLinter/SublimeLinter3/sublimelinter.py/SublimeLinter.on_selection_modified_async
665
def emit(self, record): """ Emit a record. If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline [N.B. this may be removed depending on feedback]. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. """ try: msg = self.format(record) fs = "%s\n" if not hasattr(types, "UnicodeType"): #if no unicode support... self.stream.write(fs % msg) else: try: self.stream.write(fs % msg) except UnicodeError: self.stream.write(fs % msg.encode("UTF-8")) self.flush() except (__HOLE__, SystemExit): raise except: self.handleError(record)
KeyboardInterrupt
dataset/ETHPy150Open babble/babble/include/jython/Lib/logging/__init__.py/StreamHandler.emit
666
def run(self, edit): try: repo_root = utils.find_hg_root(self.view.file_name()) # XXX: Will swallow the same error for the utils. call. except __HOLE__: msg = "SublimeHg: No server found for this file." sublime.status_message(msg) return running_servers.shut_down(repo_root) sublime.status_message("SublimeHg: Killed server for '%s'" % repo_root)
AttributeError
dataset/ETHPy150Open SublimeText/SublimeHg/sublime_hg.py/KillHgServerCommand.run
667
def run(self): # The requested command interacts with remote repository or is potentially # long-running. We run it in its own console so it can be killed easily # by the user. Also, they have a chance to enter credentials if necessary. if utils.is_flag_set(self.command_data.flags, RUN_IN_OWN_CONSOLE): # FIXME: what if self.fname is None? target_dir = (self.fname if os.path.isdir(self.fname) else os.path.dirname(self.fname)) with utils.pushd(target_dir): try: run_in_console(self.command_server.hg_bin, self.command, self.command_server.encoding) except EnvironmentError, e: sublime.status_message("SublimeHg: " + e.message) print "SublimeHg: " + e.message except __HOLE__, e: sublime.status_message("SublimeHg: " + e.message) print "SublimeHg: " + e.message return # Run the requested command through the command server. try: data, exit_code = run_hg_cmd(self.command_server, self.command) sublime.set_timeout(functools.partial(self.show_output, data, exit_code), 0) except UnicodeDecodeError, e: print "SublimeHg: Can't handle command string characters." print e except Exception, e: print "SublimeHg: Error while trying to run the command server." print "*" * 80 print e print "*" * 80
NotImplementedError
dataset/ETHPy150Open SublimeText/SublimeHg/sublime_hg.py/CommandRunnerWorker.run
668
def _fixMayaOutput(): if not hasattr( sys.stdout,"flush"): def flush(*args,**kwargs): pass try: sys.stdout.flush = flush except __HOLE__: # second try #if hasattr(maya,"Output") and not hasattr(maya.Output,"flush"): class MayaOutput(maya.Output): def flush(*args,**kwargs): pass maya.Output = MayaOutput() sys.stdout = maya.Output
AttributeError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.5/pymel/internal/plogging.py/_fixMayaOutput
669
def nameToLevel(name): try: return int(name) except __HOLE__: return logLevels.getIndex(name)
ValueError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.5/pymel/internal/plogging.py/nameToLevel
670
def levelToName(level): if not isinstance(level, int): raise TypeError(level) try: return logLevels.getKey(level) except __HOLE__: return str(level)
ValueError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.5/pymel/internal/plogging.py/levelToName
671
def fix_winpython_pathenv(): """ Add Python & Python Scripts to the search path on Windows """ import ctypes from ctypes.wintypes import HWND, UINT, WPARAM, LPARAM, LPVOID try: import _winreg as winreg except __HOLE__: import winreg # took these lines from the native "win_add2path.py" pythonpath = os.path.dirname(CURINTERPRETER_PATH) scripts = os.path.join(pythonpath, "Scripts") if not os.path.isdir(scripts): os.makedirs(scripts) with winreg.CreateKey(winreg.HKEY_CURRENT_USER, u"Environment") as key: try: envpath = winreg.QueryValueEx(key, u"PATH")[0] except WindowsError: envpath = u"%PATH%" paths = [envpath] for path in (pythonpath, scripts): if path and path not in envpath and os.path.isdir(path): paths.append(path) envpath = os.pathsep.join(paths) winreg.SetValueEx(key, u"PATH", 0, winreg.REG_EXPAND_SZ, envpath) winreg.ExpandEnvironmentStrings(envpath) # notify the system about the changes SendMessage = ctypes.windll.user32.SendMessageW SendMessage.argtypes = HWND, UINT, WPARAM, LPVOID SendMessage.restype = LPARAM SendMessage(0xFFFF, 0x1A, 0, u"Environment") return True
ImportError
dataset/ETHPy150Open smartanthill/smartanthill1_0/get-smartanthill.py/fix_winpython_pathenv
672
def install_pip(): try: from urllib2 import urlopen except __HOLE__: from urllib.request import urlopen f = NamedTemporaryFile(delete=False) response = urlopen("https://bootstrap.pypa.io/get-pip.py") f.write(response.read()) f.close() try: print (exec_python_cmd([f.name])) finally: os.unlink(f.name)
ImportError
dataset/ETHPy150Open smartanthill/smartanthill1_0/get-smartanthill.py/install_pip
673
def command_import_teamocil(args): """Import teamocil config to tmuxp format.""" if args.list: try: configs_in_user = config.in_dir( teamocil_config_dir, extensions='yml') except __HOLE__: configs_in_user = [] configs_in_cwd = config.in_dir( config_dir=cwd_dir, extensions='yml') output = '' if not os.path.exists(teamocil_config_dir): output += '# %s: \n\tDirectory doesn\'t exist.\n' % \ teamocil_config_dir elif not configs_in_user: output += '# %s: \n\tNone found.\n' % teamocil_config_dir else: output += '# %s: \n\t%s\n' % ( config_dir, ', '.join(configs_in_user) ) if configs_in_cwd: output += '# current directory:\n\t%s' % ( ', '.join(configs_in_cwd) ) print(output) elif args.config: configfile = os.path.abspath(os.path.relpath( os.path.expanduser(args.config))) configparser = kaptan.Kaptan(handler='yaml') if os.path.exists(configfile): print(configfile) configparser.import_config(configfile) newconfig = config.import_teamocil(configparser.get()) configparser.import_config(newconfig) else: sys.exit('File not found: %s' % configfile) config_format = prompt_choices('Convert to', choices=[ 'yaml', 'json'], default='yaml') if config_format == 'yaml': newconfig = configparser.export( 'yaml', indent=2, default_flow_style=False ) elif config_format == 'json': newconfig = configparser.export('json', indent=2) else: sys.exit('Unknown config format.') print(newconfig) print( '---------------------------------------------------------------') print( 'Configuration import does its best to convert teamocil files.\n') if args.answer_yes or prompt_yes_no( 'The new config *WILL* require adjusting afterwards. Save config?' ): dest = None while not dest: dest_prompt = prompt('Save to: ', os.path.abspath( os.path.join(config_dir, 'myimport.%s' % config_format))) if os.path.exists(dest_prompt): print('%s exists. Pick a new filename.' % dest_prompt) continue dest = dest_prompt dest = os.path.abspath(os.path.relpath(os.path.expanduser(dest))) if args.answer_yes or prompt_yes_no('Save to %s?' % dest): buf = open(dest, 'w') buf.write(newconfig) buf.close() print('Saved to %s.' % dest) else: print( 'tmuxp has examples in JSON and YAML format at ' '<http://tmuxp.readthedocs.org/en/latest/examples.html>\n' 'View tmuxp docs at <http://tmuxp.readthedocs.org/>' ) sys.exit()
OSError
dataset/ETHPy150Open tony/tmuxp/tmuxp/cli.py/command_import_teamocil
674
def command_import_tmuxinator(args): """Import tmuxinator config to tmuxp format.""" if args.list: try: configs_in_user = config.in_dir( tmuxinator_config_dir, extensions='yml') except __HOLE__: configs_in_user = [] configs_in_cwd = config.in_dir( config_dir=cwd_dir, extensions='yml') output = '' if not os.path.exists(tmuxinator_config_dir): output += '# %s: \n\tDirectory doesn\'t exist.\n' % \ tmuxinator_config_dir elif not configs_in_user: output += '# %s: \n\tNone found.\n' % tmuxinator_config_dir else: output += '# %s: \n\t%s\n' % ( config_dir, ', '.join(configs_in_user) ) if configs_in_cwd: output += '# current directory:\n\t%s' % ( ', '.join(configs_in_cwd) ) print(output) if args.config: configfile = os.path.abspath(os.path.relpath( os.path.expanduser(args.config))) configparser = kaptan.Kaptan(handler='yaml') if os.path.exists(configfile): print(configfile) configparser.import_config(configfile) newconfig = config.import_tmuxinator(configparser.get()) configparser.import_config(newconfig) else: sys.exit('File not found: %s' % configfile) config_format = prompt_choices('Convert to', choices=[ 'yaml', 'json'], default='yaml') if config_format == 'yaml': newconfig = configparser.export( 'yaml', indent=2, default_flow_style=False ) elif config_format == 'json': newconfig = configparser.export('json', indent=2) else: sys.exit('Unknown config format.') print(newconfig) print( '---------------------------------------------------------------') print( 'Configuration import does its best to convert tmuxinator files.\n') if args.answer_yes or prompt_yes_no( 'The new config *WILL* require adjusting afterwards. Save config?' ): dest = None while not dest: dest_prompt = prompt('Save to: ', os.path.abspath( os.path.join(config_dir, 'myimport.%s' % config_format))) if os.path.exists(dest_prompt): print('%s exists. Pick a new filename.' % dest_prompt) continue dest = dest_prompt dest = os.path.abspath(os.path.relpath(os.path.expanduser(dest))) if args.answer_yes or prompt_yes_no('Save to %s?' % dest): buf = open(dest, 'w') buf.write(newconfig) buf.close() print('Saved to %s.' % dest) else: print( 'tmuxp has examples in JSON and YAML format at ' '<http://tmuxp.readthedocs.org/en/latest/examples.html>\n' 'View tmuxp docs at <http://tmuxp.readthedocs.org/>' ) sys.exit()
OSError
dataset/ETHPy150Open tony/tmuxp/tmuxp/cli.py/command_import_tmuxinator
675
def main(): """Main CLI application.""" parser = get_parser() argcomplete.autocomplete(parser, always_complete_options=False) args = parser.parse_args() log_level = 'INFO' if 'log_level' in args and isinstance(args.log_level, string_types): log_level = args.log_level.upper() setup_logger( level=log_level ) try: util.has_required_tmux_version() except exc.TmuxpException as e: logger.error(e) sys.exit() util.oh_my_zsh_auto_title() t = Server( socket_name=args.socket_name, socket_path=args.socket_path, colors=args.colors ) try: if not hasattr(args, 'callback'): parser.print_help() elif args.callback is command_load: command_load(args) elif args.callback is command_convert: command_convert(args) elif args.callback is command_import_teamocil: command_import_teamocil(args) elif args.callback is command_import_tmuxinator: command_import_tmuxinator(args) elif args.callback is command_freeze: command_freeze(args) elif args.callback is command_attach_session: command_attach_session(args) elif args.callback is command_kill_session: command_kill_session(args) except __HOLE__: pass
KeyboardInterrupt
dataset/ETHPy150Open tony/tmuxp/tmuxp/cli.py/main
676
def remote(self, name): with fasteners.InterProcessLock(self._filename + ".lock"): remotes, _ = self._load() try: return Remote(name, remotes[name]) except __HOLE__: raise ConanException("No remote '%s' defined in remotes in file %s" % (name, self._filename))
KeyError
dataset/ETHPy150Open conan-io/conan/conans/client/remote_registry.py/RemoteRegistry.remote
677
def _api_scrape(json_inp, ndx): """ Internal method to streamline the getting of data from the json Args: json_inp (json): json input from our caller ndx (int): index where the data is located in the api Returns: If pandas is present: DataFrame (pandas.DataFrame): data set from ndx within the API's json else: A dictionary of both headers and values from the page """ try: headers = json_inp['resultSets'][ndx]['headers'] values = json_inp['resultSets'][ndx]['rowSet'] except KeyError: # This is so ugly but this is what you get when your data comes out # in not a standard format try: headers = json_inp['resultSet'][ndx]['headers'] values = json_inp['resultSet'][ndx]['rowSet'] except __HOLE__: # Added for results that only include one set (ex. LeagueLeaders) headers = json_inp['resultSet']['headers'] values = json_inp['resultSet']['rowSet'] if HAS_PANDAS: return DataFrame(values, columns=headers) else: # Taken from www.github.com/bradleyfay/py-goldsberry return [dict(zip(headers, value)) for value in values]
KeyError
dataset/ETHPy150Open seemethere/nba_py/nba_py/__init__.py/_api_scrape
678
def show_negative_chains(model_path): """ Display negative chains. Parameters ---------- model_path: str The path to the model pickle file """ model = serial.load(model_path) try: control.push_load_data(False) dataset = yaml_parse.load(model.dataset_yaml_src) finally: control.pop_load_data() try: layer_to_chains = model.layer_to_chains except __HOLE__: print("This model doesn't have negative chains.") quit(-1) vis_chains = get_vis_chains(layer_to_chains, model, dataset) m = vis_chains.shape[0] grid_shape = get_grid_shape(m) return create_patch_viewer(grid_shape, vis_chains, m)
AttributeError
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/scripts/dbm/show_negative_chains.py/show_negative_chains
679
def unicodify(self, s): u""" Make sure, whatever ``s`` is, that there is a value Unicode string answered. If ``s`` is not a string then use ``str(s)`` to convert to a string first. This will make database records convert to a string of their id, instead of showing the record ``repr`` result. <em>Note that it might be that ``str`` will cause an error on the content of the object such as a list.</em>. ``None`` is replaced by an empty Unicode string. """ if s is None: return u'' if isinstance(s, unicode): return s if not isinstance(s, basestring): s = str(s) try: return unicode(s, 'utf-8') except __HOLE__: return s raise ValueError("can't handle string s")
UnicodeDecodeError
dataset/ETHPy150Open petrvanblokland/Xierpa3/xierpa3/toolbox/parsers/etreexmlparser.py/UnbasedPathResolver.unicodify
680
def _getFileContents(self, clientVersion, fileList, rawStreams): manifest = ManifestWriter(self.tmpPath) sizeList = [] exception = None for stream, (encFileId, encVersion) in \ itertools.izip(rawStreams, fileList): if stream is None: # return an exception if we couldn't find one of # the streams exception = errors.FileStreamNotFound elif not files.frozenFileHasContents(stream): exception = errors.FileHasNoContents else: contents = files.frozenFileContentInfo(stream) filePath = self.repos.contentsStore.hashToPath( contents.sha1()) try: size = os.stat(filePath).st_size sizeList.append(size) manifest.append(filePath, expandedSize=size, isChangeset=False, preserveFile=True, offset=0, ) except __HOLE__, e: if e.errno != errno.ENOENT: raise exception = errors.FileContentsNotFound if exception: raise exception(self.toFileId(encFileId), self.toVersion(encVersion)) name = manifest.close() url = os.path.join(self.urlBase(), "changeset?%s" % name) # client versions >= 44 use strings instead of ints for size # because xmlrpclib can't marshal ints > 2GiB if clientVersion >= 44: sizeList = [ str(x) for x in sizeList ] else: for size in sizeList: if size >= 0x80000000: raise errors.InvalidClientVersion( 'This version of Conary does not support ' 'downloading file contents larger than 2 ' 'GiB. Please install a new Conary ' 'client.') return url, sizeList
OSError
dataset/ETHPy150Open sassoftware/conary/conary/repository/netrepos/netserver.py/NetworkRepositoryServer._getFileContents
681
def _commitChangeSet(self, authToken, cs, mirror = False, hidden = False, statusPath = None): # walk through all of the branches this change set commits to # and make sure the user has enough permissions for the operation verList = ((x.getName(), x.getOldVersion(), x.getNewVersion()) for x in cs.iterNewTroveList()) self._checkCommitPermissions(authToken, verList, mirror, hidden) items = {} removedList = [] # check removed permissions; _checkCommitPermissions can't do # this for us since it's based on the trove type for troveCs in cs.iterNewTroveList(): if troveCs.troveType() != trove.TROVE_TYPE_REMOVED: continue removedList.append(troveCs.getNewNameVersionFlavor()) (name, version, flavor) = troveCs.getNewNameVersionFlavor() if not self.auth.authCheck(authToken, mirror = (mirror or hidden)): raise errors.InsufficientPermission if not self.auth.check(authToken, remove = True, label = version.branch().label(), trove = name): raise errors.InsufficientPermission items.setdefault((version, flavor), []).append(name) self.log(2, authToken[0], 'mirror=%s' % (mirror,), [ (x[1], x[0][0].asString(), x[0][1]) for x in items.iteritems() ]) self.repos.commitChangeSet(cs, mirror = mirror, hidden = hidden, serialize = self.serializeCommits, statusPath=statusPath) if not self.commitAction: return True userName = authToken[0] if not isinstance(userName, basestring): if userName.username: # A ValidUser token with a username specified. userName = userName.username else: # No username available. userName = 'unknown' d = { 'reppath' : self.urlBase(urlName = False), 'user' : userName, } cmd = self.commitAction % d p = util.popen(cmd, "w") try: for troveCs in cs.iterNewTroveList(): p.write("%s\n%s\n%s\n" %(troveCs.getName(), troveCs.getNewVersion().asString(), deps.formatFlavor(troveCs.getNewFlavor()))) p.close() except (__HOLE__, RuntimeError), e: # util.popen raises RuntimeError on error. p.write() raises # IOError on error (broken pipe, etc) # FIXME: use a logger for this sys.stderr.write('commitaction failed: %s\n' %e) sys.stderr.flush() except Exception, e: sys.stderr.write('unexpected exception occurred when running ' 'commitaction: %s\n' %e) sys.stderr.flush() return True
IOError
dataset/ETHPy150Open sassoftware/conary/conary/repository/netrepos/netserver.py/NetworkRepositoryServer._commitChangeSet
682
@accessReadOnly def getCommitProgress(self, authToken, clientVersion, url): base = util.normurl(self.urlBase()) url = util.normurl(url) if not url.startswith(base): raise errors.RepositoryError( 'The changeset that is being committed was not ' 'uploaded to a URL on this server. The url is "%s", this ' 'server is "%s".' %(url, base)) # +1 strips off the ? from the query url fileName = url[len(base) + 1:] + "-in-status" path = "%s/%s" % (self.tmpPath, fileName) try: buf = file(path).read() return cPickle.loads(buf) except __HOLE__: return False
IOError
dataset/ETHPy150Open sassoftware/conary/conary/repository/netrepos/netserver.py/NetworkRepositoryServer.getCommitProgress
683
@accessReadOnly def getTroveSigs(self, authToken, clientVersion, infoList): self.log(2, infoList) # process the results of the more generic call ret = self.getTroveInfo(authToken, clientVersion, trove._TROVEINFO_TAG_SIGS, infoList) try: midx = [x[0] for x in ret].index(-1) except __HOLE__: pass else: raise errors.TroveMissing(infoList[midx][0], infoList[midx][1]) return [ x[1] for x in ret ]
ValueError
dataset/ETHPy150Open sassoftware/conary/conary/repository/netrepos/netserver.py/NetworkRepositoryServer.getTroveSigs
684
def _deferGenerator(g, deferred): """ See L{deferredGenerator}. """ result = None # This function is complicated by the need to prevent unbounded recursion # arising from repeatedly yielding immediately ready deferreds. This while # loop and the waiting variable solve that by manually unfolding the # recursion. waiting = [True, # defgen is waiting for result? None] # result while 1: try: result = next(g) except __HOLE__: deferred.callback(result) return deferred except: deferred.errback() return deferred # Deferred.callback(Deferred) raises an error; we catch this case # early here and give a nicer error message to the user in case # they yield a Deferred. if isinstance(result, Deferred): return fail(TypeError("Yield waitForDeferred(d), not d!")) if isinstance(result, waitForDeferred): # a waitForDeferred was yielded, get the result. # Pass result in so it don't get changed going around the loop # This isn't a problem for waiting, as it's only reused if # gotResult has already been executed. def gotResult(r, result=result): result.result = r if waiting[0]: waiting[0] = False waiting[1] = r else: _deferGenerator(g, deferred) result.d.addBoth(gotResult) if waiting[0]: # Haven't called back yet, set flag so that we get reinvoked # and return from the loop waiting[0] = False return deferred # Reset waiting to initial values for next loop waiting[0] = True waiting[1] = None result = None
StopIteration
dataset/ETHPy150Open twisted/twisted/twisted/internet/defer.py/_deferGenerator
685
def _inlineCallbacks(result, g, deferred): """ See L{inlineCallbacks}. """ # This function is complicated by the need to prevent unbounded recursion # arising from repeatedly yielding immediately ready deferreds. This while # loop and the waiting variable solve that by manually unfolding the # recursion. waiting = [True, # waiting for result? None] # result while 1: try: # Send the last result back as the result of the yield expression. isFailure = isinstance(result, failure.Failure) if isFailure: result = result.throwExceptionIntoGenerator(g) else: result = g.send(result) except __HOLE__ as e: # fell off the end, or "return" statement deferred.callback(getattr(e, "value", None)) return deferred except _DefGen_Return as e: # returnValue() was called; time to give a result to the original # Deferred. First though, let's try to identify the potentially # confusing situation which results when returnValue() is # accidentally invoked from a different function, one that wasn't # decorated with @inlineCallbacks. # The traceback starts in this frame (the one for # _inlineCallbacks); the next one down should be the application # code. appCodeTrace = exc_info()[2].tb_next if isFailure: # If we invoked this generator frame by throwing an exception # into it, then throwExceptionIntoGenerator will consume an # additional stack frame itself, so we need to skip that too. appCodeTrace = appCodeTrace.tb_next # Now that we've identified the frame being exited by the # exception, let's figure out if returnValue was called from it # directly. returnValue itself consumes a stack frame, so the # application code will have a tb_next, but it will *not* have a # second tb_next. if appCodeTrace.tb_next.tb_next: # If returnValue was invoked non-local to the frame which it is # exiting, identify the frame that ultimately invoked # returnValue so that we can warn the user, as this behavior is # confusing. ultimateTrace = appCodeTrace while ultimateTrace.tb_next.tb_next: ultimateTrace = ultimateTrace.tb_next filename = ultimateTrace.tb_frame.f_code.co_filename lineno = ultimateTrace.tb_lineno warnings.warn_explicit( "returnValue() in %r causing %r to exit: " "returnValue should only be invoked by functions decorated " "with inlineCallbacks" % ( ultimateTrace.tb_frame.f_code.co_name, appCodeTrace.tb_frame.f_code.co_name), DeprecationWarning, filename, lineno) deferred.callback(e.value) return deferred except: deferred.errback() return deferred if isinstance(result, Deferred): # a deferred was yielded, get the result. def gotResult(r): if waiting[0]: waiting[0] = False waiting[1] = r else: _inlineCallbacks(r, g, deferred) result.addBoth(gotResult) if waiting[0]: # Haven't called back yet, set flag so that we get reinvoked # and return from the loop waiting[0] = False return deferred result = waiting[1] # Reset waiting to initial values for next loop. gotResult uses # waiting, but this isn't a problem because gotResult is only # executed once, and if it hasn't been executed yet, the return # branch above would have been taken. waiting[0] = True waiting[1] = None return deferred
StopIteration
dataset/ETHPy150Open twisted/twisted/twisted/internet/defer.py/_inlineCallbacks
686
def contents_of(f, encoding='utf-8'): """Helper to read the contents of the given file or path into a string with the given encoding. Encoding defaults to 'utf-8', other useful encodings are 'ascii' and 'latin-1'.""" try: contents = f.read() except AttributeError: try: with open(f, 'r') as fp: contents = fp.read() except __HOLE__: raise ValueError('val must be file or path, but was type <%s>' % type(f).__name__) except OSError: if not isinstance(f, str_types): raise ValueError('val must be file or path, but was type <%s>' % type(f).__name__) raise if sys.version_info[0] == 3 and type(contents) is bytes: # in PY3 force decoding of bytes to target encoding return contents.decode(encoding, 'replace') elif sys.version_info[0] == 2 and encoding == 'ascii': # in PY2 force encoding back to ascii return contents.encode('ascii', 'replace') else: # in all other cases, try to decode to target encoding try: return contents.decode(encoding, 'replace') except AttributeError: pass # if all else fails, just return the contents "as is" return contents
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/assertpy/assertpy.py/contents_of
687
def contains_sequence(self, *items): """Asserts that val contains the given sequence of items in order.""" if len(items) == 0: raise ValueError('one or more args must be given') else: try: for i in xrange(len(self.val) - len(items) + 1): for j in xrange(len(items)): if self.val[i+j] != items[j]: break else: return self except __HOLE__: raise TypeError('val is not iterable') self._err('Expected <%s> to contain sequence %s, but did not.' % (self.val, items))
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/assertpy/assertpy.py/AssertionBuilder.contains_sequence
688
def contains_duplicates(self): """Asserts that val is iterable and contains duplicate items.""" try: if len(self.val) != len(set(self.val)): return self except __HOLE__: raise TypeError('val is not iterable') self._err('Expected <%s> to contain duplicates, but did not.' % self.val)
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/assertpy/assertpy.py/AssertionBuilder.contains_duplicates
689
def does_not_contain_duplicates(self): """Asserts that val is iterable and does not contain any duplicate items.""" try: if len(self.val) == len(set(self.val)): return self except __HOLE__: raise TypeError('val is not iterable') self._err('Expected <%s> to not contain duplicates, but did.' % self.val)
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/assertpy/assertpy.py/AssertionBuilder.does_not_contain_duplicates
690
def extracting(self, *names): """Asserts that val is collection, then extracts the named properties or named zero-arg methods into a list (or list of tuples if multiple names are given).""" if not isinstance(self.val, collections.Iterable): raise TypeError('val is not iterable') if isinstance(self.val, str_types): raise TypeError('val must not be string') if len(names) == 0: raise ValueError('one or more name args must be given') extracted = [] for i in self.val: items = [] for name in names: if type(i) is dict: if name in i: items.append(i[name]) else: raise ValueError('item keys %s did not contain key <%s>' % (list(i.keys()), name)) elif hasattr(i, name): attr = getattr(i, name) if callable(attr): try: items.append(attr()) except __HOLE__: raise ValueError('val method <%s()> exists, but is not zero-arg method' % name) else: items.append(attr) else: raise ValueError('val does not have property or zero-arg method <%s>' % name) extracted.append(tuple(items) if len(items) > 1 else items[0]) return AssertionBuilder(extracted, self.description) ### dynamic assertions ###
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/assertpy/assertpy.py/AssertionBuilder.extracting
691
def __getattr__(self, attr): """Asserts that val has attribute attr and that attribute's value is equal to other via a dynamic assertion of the form: has_<attr>().""" if not attr.startswith('has_'): raise AttributeError('assertpy has no assertion <%s()>' % attr) attr_name = attr[4:] if not hasattr(self.val, attr_name): raise AttributeError('val has no attribute <%s>' % attr_name) def _wrapper(*args, **kwargs): if len(args) != 1: raise TypeError('assertion <%s()> takes exactly 1 argument (%d given)' % (attr, len(args))) other = args[0] val_attr = getattr(self.val, attr_name) if callable(val_attr): try: val = val_attr() except __HOLE__: raise TypeError('val does not have zero-arg method <%s()>' % attr_name) else: val = val_attr if val != other: self._err('Expected <%s> to be equal to <%s>, but was not.' % (val, other)) return self return _wrapper ### expected exceptions ###
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/assertpy/assertpy.py/AssertionBuilder.__getattr__
692
def apply(self, dataset, can_fit=False): """ .. todo:: WRITEME """ patches = dataset.get_topological_view() num_topological_dimensions = len(patches.shape) - 2 if num_topological_dimensions != len(self.patch_shape): raise ValueError("ReassembleGridPatches with " + str(len(self.patch_shape)) + " topological dimensions called on dataset " + " with " + str(num_topological_dimensions) + ".") num_patches = patches.shape[0] num_examples = num_patches for im_dim, patch_dim in zip(self.orig_shape, self.patch_shape): if im_dim % patch_dim != 0: raise Exception('Trying to assemble patches of shape ' + str(self.patch_shape) + ' into images of ' + 'shape ' + str(self.orig_shape)) patches_this_dim = im_dim / patch_dim if num_examples % patches_this_dim != 0: raise Exception('Trying to re-assemble ' + str(num_patches) + ' patches of shape ' + str(self.patch_shape) + ' into images of shape ' + str(self.orig_shape) ) num_examples /= patches_this_dim # batch size reassembled_shape = [num_examples] # topological dimensions for dim in self.orig_shape: reassembled_shape.append(dim) # number of channels reassembled_shape.append(patches.shape[-1]) reassembled = numpy.zeros(reassembled_shape, dtype=patches.dtype) channel_slice = slice(0, patches.shape[-1]) coords = [0] * (num_topological_dimensions + 1) max_strides = [num_examples - 1] for dim, pd in zip(self.orig_shape, self.patch_shape): assert dim % pd == 0 max_strides.append(dim / pd - 1) keep_going = True i = 0 while keep_going: args = [coords[0]] for j in xrange(num_topological_dimensions): coord = coords[j + 1] args.append(slice(coord * self.patch_shape[j], (coord + 1) * self.patch_shape[j])) next_shape_coord = reassembled.shape[j + 1] assert (coord + 1) * self.patch_shape[j] <= next_shape_coord args.append(channel_slice) try: patch = patches[i, :] except __HOLE__: reraise_as(IndexError('Gave index of ' + str(i) + ', : into thing of shape ' + str(patches.shape))) reassembled[args] = patch i += 1 j = 0 keep_going = False while not keep_going: if coords[-(j + 1)] < max_strides[-(j + 1)]: coords[-(j + 1)] += 1 keep_going = True else: coords[-(j + 1)] = 0 if j == num_topological_dimensions: break j = j + 1 dataset.set_topological_view(reassembled) # fix labels if dataset.y is not None: dataset.y = dataset.y[::patches.shape[0] / reassembled_shape[0]]
IndexError
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/datasets/preprocessing.py/ReassembleGridPatches.apply
693
def apply(self, dataset, can_fit=False): """ .. todo:: WRITEME """ w_rows, w_cols = self.window_shape arr = dataset.get_topological_view() try: axes = dataset.view_converter.axes except __HOLE__: reraise_as(NotImplementedError("I don't know how to tell what the " "axes of this kind of dataset " "are.")) needs_transpose = not axes[1:3] == (0, 1) if needs_transpose: arr = numpy.transpose(arr, (axes.index('c'), axes.index(0), axes.index(1), axes.index('b'))) r_off = (arr.shape[1] - w_rows) // 2 c_off = (arr.shape[2] - w_cols) // 2 new_arr = arr[:, r_off:r_off + w_rows, c_off:c_off + w_cols, :] if needs_transpose: index_map = tuple(('c', 0, 1, 'b').index(axis) for axis in axes) new_arr = numpy.transpose(new_arr, index_map) dataset.set_topological_view(new_arr, axes=axes)
AttributeError
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/datasets/preprocessing.py/CentralWindow.apply
694
def isiterable(x): "Returns `True` if the specified object is iterable." try: iter(x) except __HOLE__: return False return True
TypeError
dataset/ETHPy150Open lsaffre/lino/lino/utils/__init__.py/isiterable
695
def __getitem__(self, key): """ Returns the last data value for this key, or [] if it's an empty list; raises KeyError if not found. """ try: list_ = dict.__getitem__(self, key) except KeyError: raise MultiValueDictKeyError("Key %r not found in %r" % (key, self)) try: return list_[-1] except __HOLE__: return []
IndexError
dataset/ETHPy150Open wcong/ants/ants/utils/datatypes.py/MultiValueDict.__getitem__
696
def get(self, key, default=None): "Returns the default value if the requested data doesn't exist" try: val = self[key] except __HOLE__: return default if val == []: return default return val
KeyError
dataset/ETHPy150Open wcong/ants/ants/utils/datatypes.py/MultiValueDict.get
697
def getlist(self, key): "Returns an empty list if the requested data doesn't exist" try: return dict.__getitem__(self, key) except __HOLE__: return []
KeyError
dataset/ETHPy150Open wcong/ants/ants/utils/datatypes.py/MultiValueDict.getlist
698
def update(self, *args, **kwargs): "update() extends rather than replaces existing key lists. Also accepts keyword args." if len(args) > 1: raise TypeError("update expected at most 1 arguments, got %d" % len(args)) if args: other_dict = args[0] if isinstance(other_dict, MultiValueDict): for key, value_list in other_dict.lists(): self.setlistdefault(key, []).extend(value_list) else: try: for key, value in other_dict.items(): self.setlistdefault(key, []).append(value) except __HOLE__: raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary") for key, value in kwargs.iteritems(): self.setlistdefault(key, []).append(value)
TypeError
dataset/ETHPy150Open wcong/ants/ants/utils/datatypes.py/MultiValueDict.update
699
def __getitem__(self, key): for dict_ in self.dicts: try: return dict_[key] except __HOLE__: pass raise KeyError
KeyError
dataset/ETHPy150Open wcong/ants/ants/utils/datatypes.py/MergeDict.__getitem__