Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
9,100
def has_key(self, key): try: value = self[key] except __HOLE__: return False return True
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/webob_0_9/webob/util/dictmixin.py/DictMixin.has_key
9,101
def setdefault(self, key, default=None): try: return self[key] except __HOLE__: self[key] = default return default
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/webob_0_9/webob/util/dictmixin.py/DictMixin.setdefault
9,102
def pop(self, key, *args): if len(args) > 1: raise TypeError, "pop expected at most 2 arguments, got "\ + repr(1 + len(args)) try: value = self[key] except __HOLE__: if args: return args[0] raise del self[key] return value
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/webob_0_9/webob/util/dictmixin.py/DictMixin.pop
9,103
def popitem(self): try: k, v = self.iteritems().next() except __HOLE__: raise KeyError, 'container is empty' del self[k] return (k, v)
StopIteration
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/webob_0_9/webob/util/dictmixin.py/DictMixin.popitem
9,104
def get(self, key, default=None): try: return self[key] except __HOLE__: return default
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/webob_0_9/webob/util/dictmixin.py/DictMixin.get
9,105
def changed(self, originally_changed): Specification.changed(self, originally_changed) try: del self._v_attrs except __HOLE__: pass
AttributeError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/zope/zope/interface/declarations.py/Declaration.changed
9,106
def implementedByFallback(cls): """Return the interfaces implemented for a class' instances The value returned is an IDeclaration. for example: >>> from zope.interface import Interface >>> class I1(Interface): pass ... >>> class I2(I1): pass ... >>> class I3(Interface): pass ... >>> class I4(I3): pass ... >>> class C1(object): ... implements(I2) >>> class C2(C1): ... implements(I3) >>> [i.getName() for i in implementedBy(C2)] ['I3', 'I2'] Really, any object should be able to receive a successful answer, even an instance: >>> class Callable(object): ... def __call__(self): ... return self >>> implementedBy(Callable()) <implementedBy zope.interface.declarations.?> Note that the name of the spec ends with a '?', because the `Callable` instance does not have a `__name__` attribute. """ # This also manages storage of implementation specifications try: spec = cls.__dict__.get('__implemented__') except __HOLE__: # we can't get the class dict. This is probably due to a # security proxy. If this is the case, then probably no # descriptor was installed for the class. # We don't want to depend directly on zope.security in # zope.interface, but we'll try to make reasonable # accommodations in an indirect way. # We'll check to see if there's an implements: spec = getattr(cls, '__implemented__', None) if spec is None: # There's no spec stred in the class. Maybe its a builtin: spec = BuiltinImplementationSpecifications.get(cls) if spec is not None: return spec return _empty if spec.__class__ == Implements: # we defaulted to _empty or there was a spec. Good enough. # Return it. return spec # TODO: need old style __implements__ compatibility? # Hm, there's an __implemented__, but it's not a spec. Must be # an old-style declaration. Just compute a spec for it return Declaration(*_normalizeargs((spec, ))) if isinstance(spec, Implements): return spec if spec is None: spec = BuiltinImplementationSpecifications.get(cls) if spec is not None: return spec # TODO: need old style __implements__ compatibility? if spec is not None: # old-style __implemented__ = foo declaration spec = (spec, ) # tuplefy, as it might be just an int spec = Implements(*_normalizeargs(spec)) spec.inherit = None # old-style implies no inherit del cls.__implemented__ # get rid of the old-style declaration else: try: bases = cls.__bases__ except AttributeError: if not callable(cls): raise TypeError("ImplementedBy called for non-factory", cls) bases = () spec = Implements(*[implementedBy(c) for c in bases]) spec.inherit = cls spec.__name__ = (getattr(cls, '__module__', '?') or '?') + \ '.' + (getattr(cls, '__name__', '?') or '?') try: cls.__implemented__ = spec if not hasattr(cls, '__providedBy__'): cls.__providedBy__ = objectSpecificationDescriptor if (isinstance(cls, DescriptorAwareMetaClasses) and '__provides__' not in cls.__dict__): # Make sure we get a __provides__ descriptor cls.__provides__ = ClassProvides( cls, getattr(cls, '__class__', type(cls)), ) except TypeError: if not isinstance(cls, type): raise TypeError("ImplementedBy called for non-type", cls) BuiltinImplementationSpecifications[cls] = spec return spec
AttributeError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/zope/zope/interface/declarations.py/implementedByFallback
9,107
def __call__(self, ob): if isinstance(ob, DescriptorAwareMetaClasses): classImplements(ob, *self.interfaces) return ob spec = Implements(*self.interfaces) try: ob.__implemented__ = spec except __HOLE__: raise TypeError("Can't declare implements", ob) return ob
AttributeError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/zope/zope/interface/declarations.py/implementer.__call__
9,108
def getObjectSpecification(ob): provides = getattr(ob, '__provides__', None) if provides is not None: if isinstance(provides, SpecificationBase): return provides try: cls = ob.__class__ except __HOLE__: # We can't get the class, so just consider provides return _empty return implementedBy(cls)
AttributeError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/zope/zope/interface/declarations.py/getObjectSpecification
9,109
def providedBy(ob): # Here we have either a special object, an old-style declaration # or a descriptor # Try to get __providedBy__ try: r = ob.__providedBy__ except AttributeError: # Not set yet. Fall back to lower-level thing that computes it return getObjectSpecification(ob) try: # We might have gotten a descriptor from an instance of a # class (like an ExtensionClass) that doesn't support # descriptors. We'll make sure we got one by trying to get # the only attribute, which all specs have. r.extends except AttributeError: # The object's class doesn't understand descriptors. # Sigh. We need to get an object descriptor, but we have to be # careful. We want to use the instance's __provides__, if # there is one, but only if it didn't come from the class. try: r = ob.__provides__ except __HOLE__: # No __provides__, so just fall back to implementedBy return implementedBy(ob.__class__) # We need to make sure we got the __provides__ from the # instance. We'll do this by making sure we don't get the same # thing from the class: try: cp = ob.__class__.__provides__ except AttributeError: # The ob doesn't have a class or the class has no # provides, assume we're done: return r if r is cp: # Oops, we got the provides from the class. This means # the object doesn't have it's own. We should use implementedBy return implementedBy(ob.__class__) return r
AttributeError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/zope/zope/interface/declarations.py/providedBy
9,110
def find_reverse(self, string, region): new_regions = (r for r in reversed(self.view.find_all(string)) if r.begin() < region.end()) try: if sys.version_info < (3,0,0) : new_region = new_regions.next() else : new_region = next(new_regions) except __HOLE__: return None else: return new_region
StopIteration
dataset/ETHPy150Open Pephers/Super-Calculator/Super Calculator.py/SuperCalculatorCommand.find_reverse
9,111
def main(logfile): p = hotshot.Profile(logfile) benchtime, stones = p.runcall(test.pystone.pystones) p.close() print "Pystone(%s) time for %d passes = %g" % \ (test.pystone.__version__, test.pystone.LOOPS, benchtime) print "This machine benchmarks at %g pystones/second" % stones stats = hotshot.stats.load(logfile) stats.strip_dirs() stats.sort_stats('time', 'calls') try: stats.print_stats(20) except __HOLE__, e: if e.errno != errno.EPIPE: raise
IOError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/hotshot/stones.py/main
9,112
def _request_while(self, option_name, options, kls, default_option, one_line_options=False): str_list_options = "" if options: str_list_options = "(/o list options)" while True: #logger.debug("Request to user...") str_ = '' if not default_option: str_ = self.request_string("Enter %s %s" % (option_name, str_list_options)) else: str_ = self.request_string("Enter %s (default:%s) %s" % (option_name, default_option, str_list_options)) if str_ == '/o': self.print_options(options, one_line_options) else: if str_ == '' and default_option: return default_option if str_.lower() == 'none' and ('none' in options or 'None' in options): return None ret = None try: ret = kls(str_) if kls is not None else str_ if len(options) == 0 or ret in options: return ret except (BiiException, __HOLE__): pass self.out.error("%s is not a valid %s \n" % (str_, option_name)) self.print_options(options, one_line_options)
AttributeError
dataset/ETHPy150Open biicode/client/shell/userio.py/UserIO._request_while
9,113
def new_frame(self, *args): # args must be filename, firstlineno, funcname # our code objects are cached since we don't need to create # new ones every time try: code = self._code[args] except __HOLE__: code = FakeCode(*args) self._code[args] = code # frame objects are create fresh, since the back pointer will # vary considerably if self._stack: back = self._stack[-1] else: back = None frame = FakeFrame(code, back) self._stack.append(frame) return frame
KeyError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/hotshot/stats.py/StatsLoader.new_frame
9,114
def daemonize(self): ''' do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 ''' try: pid = os.fork() if pid > 0: # exit first parent sys.exit(0) except OSError, oserr: sys.stderr.write("fork #1 failed:{0}({1})\n".format( oserr.errno, oserr.strerror, )) sys.exit(1) # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent sys.exit(0) except __HOLE__, oserr: sys.stderr.write("fork #2 failed:{0}({1})\n".format( oserr.errno, oserr.strerror, )) sys.exit(1) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() stdi = file(self.stdin, 'r') stdo = file(self.stdout, 'a+') stde = file(self.stderr, 'a+', 0) os.dup2(stdi.fileno(), sys.stdin.fileno()) os.dup2(stdo.fileno(), sys.stdout.fileno()) os.dup2(stde.fileno(), sys.stderr.fileno()) # register two signal handlers to handle SIGTERM and SIGKILL properly signal.signal(signal.SIGINT, self.stop) signal.signal(signal.SIGTERM, self.stop) # write pidfile pid = str(os.getpid()) file(self.pidfile, 'w+').write("%s\n" % pid)
OSError
dataset/ETHPy150Open felskrone/salt-eventsd/salteventsd/daemon.py/Daemon.daemonize
9,115
def start(self): ''' Start the daemon ''' # Check for a pidfile to see if the daemon already runs try: pidf = file(self.pidfile, 'r') pid = int(pidf.read().strip()) pidf.close() except __HOLE__: pid = None if pid: sys.stderr.write("pidfile {0} already exists.\n".format(self.pidfile)) sys.exit(1) # Start the daemon self.daemonize() self.run()
IOError
dataset/ETHPy150Open felskrone/salt-eventsd/salteventsd/daemon.py/Daemon.start
9,116
def stop(self, signal, frame): ''' We override stop() to brake our main loop and have a pretty log message ''' log.info("Received signal {0}".format(signal)) # if we have running workers, run through all and join() the ones # that have finished. if we still have running workers after that, # wait 5 secs for the rest and then exit. Maybe we should improv # this a litte bit more if len(self.running_workers) > 0: clean_workers = [] for count in range(0, 2): for worker in self.running_workers: if worker.isAlive(): clean_workers.append(worker) else: worker.join() log.debug("Joined worker #{0}".format(worker.getName())) if len(clean_workers) > 0: log.info("Waiting 5secs for remaining workers..") time.sleep(5) else: break log.info("salt-eventsd has shut down") # leave the cleanup to the supers stop try: super(SaltEventsDaemon, self).stop(signal, frame) except (IOError, __HOLE__): os._exit(0)
OSError
dataset/ETHPy150Open felskrone/salt-eventsd/salteventsd/daemon.py/SaltEventsDaemon.stop
9,117
def listen(self): ''' The main event loop where we receive the events and start the workers that dump our data into the database ''' # log on to saltstacks event-bus event = salt.utils.event.SaltEvent( self.node, self.sock_dir, ) # we store our events in a list, we dont really care about an order # or what kind of data is put in there. all that is configured with the # templates configured in the configfile event_queue = [] # start our timers self.ev_timer.start() self.state_timer.start() # this is for logline chronology so the timer-message always comes # _before_ the actual startup-message of the listening loop below :-) time.sleep(1) log.info("Entering main event loop") log.info("Listening on: {0}".format(event.puburi)) # read everything we can get our hands on while True: # the zmq-socket does not like ^C very much, make the error # a little more graceful. alright, alright, ignore the damn thing, # we're exiting anyways... try: ret = event.get_event(full=True) except zmq.ZMQError: pass except __HOLE__: log.info('Received CTRL+C, shutting down') self.stop(signal.SIGTERM, None) # if we have not received enough events in to reach event_limit # and the timer has fired, dump the events collected so far # to the workers if(self.ev_timer_ev): if (len(self.running_workers) < self.max_workers) and \ (len(event_queue) > 0): self._init_worker(event_queue) # reset our queue to prevent duplicate entries del event_queue[:] # we reset the timer.ev_timer_ev at the end of the loop # so we can update the stats that are logged if ret is None: continue # filter only the events we're interested in. all events have a tag # we can filter them by. we match with a precompiled regex if 'tag' in ret: # filter out events with an empty tag. those are special if ret['tag'] != '': # run through our configured events and try to match the # current events tag against the ones we're interested in for key in self.event_map.keys(): if self.event_map[key]['tag'].match(ret['tag']): log.debug("Matching on {0}:{1}".format(key, ret['tag'])) prio = self.event_map[key].get('prio', 0) # push prio1-events directly into a worker if prio > 0: log.debug('Prio1 event found, pushing immediately!') self.events_han += 1 self._init_worker([ret]) else: event_queue.append(ret) self.events_han += 1 # once we reach the event_limit, start a worker that # writes that data in to the database if len(event_queue) >= self.event_limit: # only start a worker if not too many workers are running if len(self.running_workers) < self.max_workers: self._init_worker(event_queue) # reset the timer self.ev_timer.reset() # reset our queue to prevent duplicate entries del event_queue[:] else: # FIXME: we need to handle this situation somehow if # too many workers are running. just flush the events? # there really is no sane way except queueing more and more # until some sort of limit is reached and we care more about # our saltmaster than about the collected events! log.critical("Too many workers running, loosing data!!!") # a list for the workers that are still running clean_workers = [] # run through all the workers and join() the ones # that have finished dumping their data and keep # the running ones on our list for worker in self.running_workers: if worker.isAlive(): clean_workers.append(worker) else: worker.join() log.debug("Joined worker #{0}".format(worker.getName())) self.threads_join += 1 # get rid of the old reference and set a new one # FIXME: is this really neccessary? del self.running_workers self.running_workers = clean_workers self.events_rec += 1 if(self.ev_timer_ev): self.ev_timer_ev = False log.info("Listen loop ended...")
KeyboardInterrupt
dataset/ETHPy150Open felskrone/salt-eventsd/salteventsd/daemon.py/SaltEventsDaemon.listen
9,118
def _get_pid(self): ''' Get our current pid from the pidfile and fall back to os.getpid() if pidfile not present (in foreground mode) ''' pid = None try: pidf = file(self.pidfile, 'r') pid = int(pidf.read().strip()) pidf.close() except __HOLE__: pid = os.getpid() return pid
IOError
dataset/ETHPy150Open felskrone/salt-eventsd/salteventsd/daemon.py/SaltEventsDaemon._get_pid
9,119
def _write_state(self): ''' Writes a current status to the defined status-file this includes the current pid, events received/handled and threads created/joined ''' ev_hdl_per_s = float((float(self.events_han - self.stat_hdl_count)) / float(self.state_timer_intrvl)) ev_tot_per_s = float((float(self.events_rec - self.stat_rec_count)) / float(self.state_timer_intrvl)) if self.config['stat_worker']: stat_data = { 'events_rec': self.events_rec, 'events_hdl': self.events_han, 'events_hdl_sec': round(ev_hdl_per_s, 2), 'events_tot_sec': round(ev_tot_per_s, 2), 'threads_created': self.threads_cre, 'threads_joined': self.threads_join } self.threads_cre += 1 st_worker = SaltEventsdWorker( stat_data, self.threads_cre, None, self.backends, **self.opts ) st_worker.start() try: self.running_workers.append(st_worker) except __HOLE__: log.error('self is missing running_workers') try: log.info(self) log.info(dir(self)) except Exception: log.error('Failed to dump dir(self)') try: # write the info to the specified log statf = open(self.state_file, 'w') statf.writelines( json.dumps({ 'events_rec': self.events_rec, 'events_hdl': self.events_han, 'events_hdl_sec': round(ev_hdl_per_s, 2), 'events_tot_sec': round(ev_tot_per_s, 2), 'threads_created': self.threads_cre, 'threads_joined': self.threads_join }) ) # if we have the same pid as the pidfile, we are the running daemon # and also print the current counters to the logfile with 'info' if os.getpid() == self.pid: log.info("Running with pid {0}".format(self.pid)) log.info("Events (han/recv): {0}/{1}".format( self.events_han, self.events_rec, )) log.info("Threads (cre/joi):{0}/{1}".format( self.threads_cre, self.threads_join, )) statf.write("\n") statf.close() sys.stdout.flush() except IOError as ioerr: log.critical("Failed to write state to {0}".format(self.state_file)) log.exception(ioerr) except OSError as oserr: log.critical("Failed to write state to {0}".format(self.state_file)) log.exception(oserr) self.stat_rec_count = self.events_rec self.stat_hdl_count = self.events_han
AttributeError
dataset/ETHPy150Open felskrone/salt-eventsd/salteventsd/daemon.py/SaltEventsDaemon._write_state
9,120
def _init_events(self, events={}): ''' Creates a dict of precompiled regexes for all defined events from config for maximum performance. ''' self.event_map = events # we precompile all regexes log.info("Initialising events...") for key in events.keys(): # we compile the regex configured in the config self.event_map[key]['tag'] = compile(events[key]['tag']) log.info("Added event '{0}'".format(key)) # if subevents are configured, also update them with # regex-matching object if 'subs' in events[key]: for sub_ev in events[key]['subs'].keys(): try: self.event_map[key]['subs'][sub_ev]['fun'] = compile(events[key]['subs'][sub_ev]['fun']) except KeyError: pass try: self.event_map[key]['subs'][sub_ev]['tag'] = compile(events[key]['subs'][sub_ev]['tag']) except __HOLE__: pass log.info("Added sub-event '{0}->{1}'".format(key, sub_ev))
KeyError
dataset/ETHPy150Open felskrone/salt-eventsd/salteventsd/daemon.py/SaltEventsDaemon._init_events
9,121
def param_rischDE(fa, fd, G, DE): """ Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)). """ _, (fa, fd) = weak_normalizer(fa, fd, DE) a, (ba, bd), G, hn = prde_normal_denom(ga, gd, G, DE) A, B, G, hs = prde_special_denom(a, ba, bd, G, DE) g = gcd(A, B) A, B, G = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for gia, gid in G] Q, M = prde_linear_constraints(A, B, G, DE) M, _ = constant_system(M, zeros(M.rows, 1), DE) # Reduce number of constants at this point try: # Similar to rischDE(), we try oo, even though it might lead to # non-termination when there is no solution. At least for prde_spde, # it will always terminate no matter what n is. n = bound_degree(A, B, G, DE, parametric=True) except __HOLE__: debug("param_rischDE: Proceeding with n = oo; may cause " "non-termination.") n = oo A, B, Q, R, n1 = prde_spde(A, B, Q, n, DE)
NotImplementedError
dataset/ETHPy150Open sympy/sympy/sympy/integrals/prde.py/param_rischDE
9,122
def _initialize_testr(self): if not os.path.isdir(self.path(".testrepository")): LOG.debug("Initialization of 'testr'.") cmd = ["testr", "init"] if self.venv_wrapper: cmd.insert(0, self.venv_wrapper) try: check_output(cmd, cwd=self.path()) except (subprocess.CalledProcessError, __HOLE__): if os.path.exists(self.path(".testrepository")): shutil.rmtree(self.path(".testrepository")) raise TempestSetupFailure(_("Failed to initialize 'testr'"))
OSError
dataset/ETHPy150Open openstack/rally/rally/verification/tempest/tempest.py/Tempest._initialize_testr
9,123
def setup_server_socket(self, interface='localhost', port=5050): """Sets up the socket listener. Args: interface: String name of which interface this socket will listen on. port: Integer TCP port number the socket will listen on. """ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.log.info('Starting up on %s port %s', interface, port) try: self.socket.bind((interface, port)) except __HOLE__: self.log.critical('Socket bind IOError') raise self.socket.listen(1)
IOError
dataset/ETHPy150Open missionpinball/mpf/mpf/media_controller/core/bcp_server.py/BCPServer.setup_server_socket
9,124
def sending_loop(self): """Sending loop which transmits data from the sending queue to the remote socket. This method is run as a thread. """ try: while not self.done: msg = self.sending_queue.get() if not msg.startswith('dmd_frame'): self.log.debug('Sending "%s"', msg) try: self.connection.sendall(msg + '\n') except (__HOLE__, socket.error): pass # Do we just keep on trying, waiting until a new client # connects? self.socket.close() self.socket = None self.mc.socket_thread_stopped() except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) msg = ''.join(line for line in lines) self.mc.crash_queue.put(msg)
AttributeError
dataset/ETHPy150Open missionpinball/mpf/mpf/media_controller/core/bcp_server.py/BCPServer.sending_loop
9,125
def exists(self, name): # Try to retrieve file info. Return true on success, false on failure. remote_path = self._remote_path(name) try: self.sftp.stat(remote_path) return True except __HOLE__: return False
IOError
dataset/ETHPy150Open jschneier/django-storages/storages/backends/sftpstorage.py/SFTPStorage.exists
9,126
def str_encode(value, encoder='base64'): ''' .. versionadded:: 2014.7.0 value The value to be encoded. encoder : base64 The encoder to use on the subsequent string. CLI Example: .. code-block:: bash salt '*' random.str_encode 'I am a new string' base64 ''' try: out = value.encode(encoder) except LookupError: raise SaltInvocationError('You must specify a valid encoder') except __HOLE__: raise SaltInvocationError('Value must be an encode-able string') return out
AttributeError
dataset/ETHPy150Open saltstack/salt/salt/modules/mod_random.py/str_encode
9,127
def _iter_filter(root_dir, data_dict): root_fragment = os.path.join(root_dir, 'pex/') pex_fragment = '/%s/_pex/' % PEXBuilder.BOOTSTRAP_DIR for filename, records in data_dict.items(): # already acceptable coverage if filename.startswith(root_fragment): yield (filename, dict((record, None) for record in records)) continue # possible it's coverage from within a pex environment try: bi = filename.index(pex_fragment) except __HOLE__: continue # rewrite to make up for fact that each pex environment is ephemeral. yield ( os.path.join(root_dir, 'pex', filename[bi + len(pex_fragment):]), dict((record, None) for record in records) )
ValueError
dataset/ETHPy150Open pantsbuild/pex/scripts/combine_coverage.py/_iter_filter
9,128
def fix_attachlist(font): """Fix duplicate attachment points in GDEF table.""" modified = False try: attach_points = font['GDEF'].table.AttachList.AttachPoint except (__HOLE__, AttributeError): attach_points = [] for attach_point in attach_points: points = sorted(set(attach_point.PointIndex)) if points != attach_point.PointIndex: attach_point.PointIndex = points attach_point.PointCount = len(points) modified = True if modified: print 'Fixed GDEF.AttachList' return modified
KeyError
dataset/ETHPy150Open googlei18n/nototools/nototools/autofix_for_release.py/fix_attachlist
9,129
def get_converter(convertor): """function for decoration of convert """ def decorator_selector(data_type, data): convert = None if data_type in LITERAL_DATA_TYPES: if data_type == 'string': convert = convert_string elif data_type == 'integer': convert = convert_integer elif data_type == 'float': convert = convert_float elif data_type == 'boolean': convert = convert_boolean elif data_type == 'positiveInteger': convert = convert_positiveInteger elif data_type == 'anyURI': convert = convert_anyURI elif data_type == 'time': convert = convert_time elif data_type == 'scale': convert = convert_scale elif data_type == 'angle': convert = convert_angle elif data_type == 'nonNegativeInteger': convert = convert_positiveInteger else: raise InvalidParameterValue( "Invalid data_type value of LiteralInput " + "set to '{}'".format(data_type)) try: return convert(data) except __HOLE__: raise InvalidParameterValue( "Could not convert value '{}' to format '{}'".format( data, data_type)) return decorator_selector
ValueError
dataset/ETHPy150Open geopython/pywps/pywps/inout/literaltypes.py/get_converter
9,130
def ServeFromZipFile(self, zipfilename, name): """Helper for the GET request handler. This serves the contents of file 'name' from zipfile 'zipfilename', logging a message and returning a 404 response if either the zipfile cannot be opened or the named file cannot be read from it. Args: zipfilename: The name of the zipfile. name: The name within the zipfile. """ zipfile_object = self.zipfile_cache.get(zipfilename) if zipfile_object is None: try: zipfile_object = zipfile.ZipFile(zipfilename) except (IOError, RuntimeError), err: logging.error('Can\'t open zipfile %s: %s', zipfilename, err) zipfile_object = '' self.zipfile_cache[zipfilename] = zipfile_object if zipfile_object == '': self.error(404) self.response.out.write('Not found') return try: data = zipfile_object.read(name) except (KeyError, __HOLE__), err: self.error(404) self.response.out.write('Not found') return content_type, encoding = mimetypes.guess_type(name) if content_type: self.response.headers['Content-Type'] = content_type self.SetCachingHeaders() self.response.out.write(data)
RuntimeError
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/ext/zipserve/__init__.py/ZipHandler.ServeFromZipFile
9,131
def read( self, line ): """read gff entry from line. <seqname> <source> <feature> <start> <end> <score> <strand> <frame> [attributes] [comments] """ data = line[:-1].split("\t") try: (self.contig, self.source, self.feature, self.start, self.end, self.score, self.strand, self.frame ) = data[:8] except __HOLE__, msg: raise ValueError( "parsing error in line %s: %s" % (line[:-1], msg) ) ## note: frame might be . (self.start, self.end) = map(int, (self.start, self.end)) self.start -= 1 self.__parseInfo( data[8], line )
ValueError
dataset/ETHPy150Open CGATOxford/cgat/obsolete/GFF.py/Entry.read
9,132
def __parseInfo( self, attributes, line ): """parse attributes. """ # remove comments attributes = attributes.split( "#" )[0] # separate into fields fields = map( lambda x: x.strip(), attributes.split(";")[:-1]) self.mAttributes = {} for f in fields: d = map( lambda x: x.strip(), f.split(" ")) n,v = d[0], d[1] if len(d) > 2: v = d[1:] if v[0] == '"' and v[-1] == '"': v = v[1:-1] else: ## try to convert to a value try: v = float( v ) v = int( v ) except ValueError: pass except __HOLE__: pass self.mAttributes[n] = v
TypeError
dataset/ETHPy150Open CGATOxford/cgat/obsolete/GFF.py/Entry.__parseInfo
9,133
def do_X(self, callbackMethod, *args, **kwargs): if self.path == '/': callback = SupyIndex() elif self.path in ('/robots.txt',): callback = Static('text/plain; charset=utf-8') elif self.path in ('/default.css',): callback = Static('text/css') elif self.path == '/favicon.ico': callback = Favicon() else: subdir = self.path.split('/')[1] try: callback = self.server.callbacks[subdir] except __HOLE__: callback = Supy404() # Some shortcuts for name in ('send_response', 'send_header', 'end_headers', 'rfile', 'wfile', 'headers'): setattr(callback, name, getattr(self, name)) # We call doX, because this is more supybotic than do_X. path = self.path if not callback.fullpath: path = '/' + path.split('/', 2)[-1] getattr(callback, callbackMethod)(self, path, *args, **kwargs)
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/src/httpserver.py/SupyHTTPRequestHandler.do_X
9,134
def doGetOrHead(self, handler, path, write_content): response = None file_path = conf.supybot.servers.http.favicon() if file_path: try: icon = open(file_path, 'rb') response = icon.read() except __HOLE__: pass finally: icon.close() if response is not None: # I have no idea why, but this headers are already sent. # filename = file_path.rsplit(os.sep, 1)[1] # if '.' in filename: # ext = filename.rsplit('.', 1)[1] # else: # ext = 'ico' # self.send_header('Content-Length', len(response)) # self.send_header('Content-type', 'image/' + ext) # self.end_headers() if write_content: self.wfile.write(response) else: response = _('No favicon set.') if minisix.PY3: response = response.encode() handler.send_response(404) self.send_header('Content-type', 'text/plain; charset=utf-8') self.send_header('Content-Length', len(response)) self.end_headers() if write_content: self.wfile.write(response)
IOError
dataset/ETHPy150Open ProgVal/Limnoria/src/httpserver.py/Favicon.doGetOrHead
9,135
def _timestamp_from_dirname(self, dir_name): if not os.path.isdir(dir_name): raise Skip('its not a build directory', dir_name) try: # Parse the timestamp from the directory name date_str = os.path.basename(dir_name) time_tuple = time.strptime(date_str, self.datetime_format) return time.mktime(time_tuple) except __HOLE__: return None
ValueError
dataset/ETHPy150Open serverdensity/sd-agent/checks.d/jenkins.py/Jenkins._timestamp_from_dirname
9,136
def _get_build_metadata(self, dir_name, watermark): if os.path.exists(os.path.join(dir_name, 'jenkins_build.tar.gz')): raise Skip('the build has already been archived', dir_name) timestamp = self._timestamp_from_dirname(dir_name) # This is not the latest build if timestamp is not None and timestamp <= watermark: return None # Read the build.xml metadata file that Jenkins generates build_metadata = os.path.join(dir_name, 'build.xml') if not os.access(build_metadata, os.R_OK): self.log.debug("Can't read build file at %s" % (build_metadata)) raise Exception("Can't access build.xml at %s" % (build_metadata)) else: tree = ElementTree() tree.parse(build_metadata) if timestamp is None: try: timestamp = self._timestamp_from_build_file(dir_name, tree) # This is not the latest build if timestamp <= watermark: return None except __HOLE__: return None keys = ['result', 'number', 'duration'] kv_pairs = ((k, tree.find(k)) for k in keys) d = dict([(k, v.text) for k, v in kv_pairs if v is not None]) d['timestamp'] = timestamp try: d['branch'] = tree.find('actions')\ .find('hudson.plugins.git.util.BuildData')\ .find('buildsByBranchName')\ .find('entry')\ .find('hudson.plugins.git.util.Build')\ .find('revision')\ .find('branches')\ .find('hudson.plugins.git.Branch')\ .find('name')\ .text except Exception: pass return d
ValueError
dataset/ETHPy150Open serverdensity/sd-agent/checks.d/jenkins.py/Jenkins._get_build_metadata
9,137
def _get_build_results(self, instance_key, job_dir): job_name = os.path.basename(job_dir) try: dirs = glob(os.path.join(job_dir, 'builds', '*_*')) # Before Jenkins v1.597 the build folders were named with a timestamp (eg: 2015-03-10_19-59-29) # Starting from Jenkins v1.597 they are named after the build ID (1, 2, 3...) # So we need try both format when trying to find the latest build and parsing build.xml if len(dirs) == 0: dirs = glob(os.path.join(job_dir, 'builds', '[0-9]*')) if len(dirs) > 0: # versions of Jenkins > 1.597 need to be sorted by build number (integer) try: dirs = sorted(dirs, key=lambda x: int(x.split('/')[-1]), reverse=True) except __HOLE__: dirs = sorted(dirs, reverse=True) # We try to get the last valid build for dir_name in dirs: watermark = self.high_watermarks[instance_key][job_name] try: build_metadata = self._get_build_metadata(dir_name, watermark) except Exception: build_metadata = None if build_metadata is not None: build_result = build_metadata.get('result') if build_result is None: break output = { 'job_name': job_name, 'event_type': 'build result' } output.update(build_metadata) if 'number' not in output: output['number'] = dir_name.split('/')[-1] self.high_watermarks[instance_key][job_name] = output.get('timestamp') self.log.debug("Processing %s results '%s'" % (job_name, output)) yield output # If it not a new build, stop here else: break except Exception, e: self.log.error("Error while working on job %s, exception: %s" % (job_name, e))
ValueError
dataset/ETHPy150Open serverdensity/sd-agent/checks.d/jenkins.py/Jenkins._get_build_results
9,138
def main(): setupLogging() args = parseArguments() # read the config file try: with open(args.file, "r") as config_file: config = json.load(config_file) except __HOLE__: print "Error opening file " + args.file return # default start date is None meaning 'all time' start_date = None if args.start_date != None: start_date = datetime.strptime(args.start_date, "%Y-%m-%d") # default end date is today end_date = datetime.today() if args.end_date != None: end_date = datetime.strptime(args.end_date, "%Y-%m-%d") # The ElasticSearch client es_client = Elasticsearch(args.es_host + ":" + str(args.es_port)) data_type = config['type'] search_body = json.dumps(config['search']) # If no start date find the first logstash index containing our docs if start_date == None: start_date = findDateOfFirstIndex(es_client, data_type, search_body) if start_date == None: print "No documents found with the query " + search_body return # The REST API client engine_client = EngineApiClient(args.api_host, API_BASE_URL, args.api_port) (http_status, response) = engine_client.createJob(json.dumps(config['job_config'])) if http_status != 201: print "Error creatting job" print http_status, json.dumps(response) return job_id = response['id'] print "Created job with id " + str(job_id) doc_count = 0 for index_name in nextLogStashIndex(start_date, end_date): print "Reading from index " + index_name skip = 0 try: # Query the documents from ElasticSearch and write to the Engine hits = es_client.search(index=index_name, doc_type=data_type, body=search_body, from_=skip, size=MAX_DOC_TAKE) except elasticsearch.exceptions.NotFoundError: # Index not found try the next one continue # upload to the API content = json.dumps(elasticSearchDocsToDicts(hits['hits']['hits'])) (http_status, response) = engine_client.upload(job_id, content) if http_status != 202: print "Error uploading log content to the Engine" print http_status, json.dumps(response) continue doc_count += len(hits['hits']['hits']) # get any other docs hitcount = int(hits['hits']['total']) while hitcount > (skip + MAX_DOC_TAKE): skip += MAX_DOC_TAKE hits = es_client.search(index=index_name, doc_type=data_type, body=search_body, from_=skip, size=MAX_DOC_TAKE) content = json.dumps(elasticSearchDocsToDicts(hits['hits']['hits'])) (http_status, response) = engine_client.upload(job_id, content) if http_status != 202: print json.dumps(response) continue doc_count += len(hits['hits']['hits']) print "Uploaded {0} records".format(str(doc_count)) (http_status, response) = engine_client.close(job_id) if http_status != 202: print "Error closing job" print http_status, json.dumps(response) return print "{0} records successfully written to job {1}".format(str(doc_count), job_id)
IOError
dataset/ETHPy150Open prelert/engine-python/elk_connector/elk_connector.py/main
9,139
def is_running(pid): try: kill(pid, 0) except __HOLE__ as error: if error.errno == ESRCH: return False return True
OSError
dataset/ETHPy150Open circuits/circuits/tests/core/test_signals.py/is_running
9,140
def upsidedown_game(g, smallmap=False, flipbuttons=False, flipsounds=False): """Turn a game upside down. This modifies the game in-place. Args: g: The Game to turn upside down. smallmap: True if the gfx/map shared region is used as gfx, False otherwise. flipbuttons: If True, reverses functions regarding reading buttons to swap left and right, up and down. flipsounds: If True, reverses sound effect / music pattern data. """ last_sprite = 256 if smallmap else 128 for id in range(last_sprite): sprite = g.gfx.get_sprite(id) flipped_sprite = reversed(list(reversed(row) for row in sprite)) g.gfx.set_sprite(id, flipped_sprite) last_map_row = 32 if smallmap else 64 tile_rect = g.map.get_rect_tiles(0, 0, 128, last_map_row) flipped_map = reversed(list(reversed(row) for row in tile_rect)) g.map.set_rect_tiles(flipped_map, 0, 0) if flipsounds: for id in range(63): notes = [g.sfx.get_note(id, n) for n in range(32)] notes.reverse() for n in range(32): g.sfx.set_note(id, n, *notes[n]) (editor_mode, note_duration, loop_start, loop_end) = g.sfx.get_properties(id) if loop_start: g.sfx.set_properties(id, loop_start=63-loop_end) if loop_end: g.sfx.set_properties(id, loop_end=63-loop_start) transform = UpsideDownASTTransform(g.lua.tokens, g.lua.root, smallmap=smallmap, flipbuttons=flipbuttons) try: it = transform.walk() while True: it.__next__() except __HOLE__: pass
StopIteration
dataset/ETHPy150Open dansanderson/picotool/pico8/demos/upsidedown.py/upsidedown_game
9,141
def __getattr__(self, key): try: return object.__getattr__(self, key) except AttributeError: try: return super(Leaf, self).__getitem__(self._get_key(key)) except __HOLE__: raise AttributeError()
KeyError
dataset/ETHPy150Open gsi-upm/senpy/senpy/models.py/Leaf.__getattr__
9,142
def __setattr__(self, key, value): try: object.__getattr__(self, key) object.__setattr__(self, key, value) except AttributeError: key = self._get_key(key) if key == "@context": value = self.get_context(value) elif key == "@id": value = self.get_id(value) if key[0] == "_": object.__setattr__(self, key, value) else: if value is None: try: super(Leaf, self).__delitem__(key) except __HOLE__: pass else: super(Leaf, self).__setitem__(key, value)
KeyError
dataset/ETHPy150Open gsi-upm/senpy/senpy/models.py/Leaf.__setattr__
9,143
@staticmethod def get_context(context): if isinstance(context, list): contexts = [] for c in context: contexts.append(Response.get_context(c)) return contexts elif isinstance(context, dict): return context elif isinstance(context, basestring): try: with open(context) as f: return json.loads(f.read()) except __HOLE__: return context
IOError
dataset/ETHPy150Open gsi-upm/senpy/senpy/models.py/Leaf.get_context
9,144
def __getstate__(self): state = styles.Versioned.__getstate__(self) for k in ('client', '_isOnline', '_isConnecting'): try: del state[k] except __HOLE__: pass return state
KeyError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/words/im/basesupport.py/AbstractAccount.__getstate__
9,145
def get_module_callables(mod, ignore=None): """Returns two maps of (*types*, *funcs*) from *mod*, optionally ignoring based on the :class:`bool` return value of the *ignore* callable. *mod* can be a string name of a module in :data:`sys.modules` or the module instance itself. """ if isinstance(mod, basestring): mod = sys.modules[mod] types, funcs = {}, {} for attr_name in dir(mod): if ignore and ignore(attr_name): continue try: attr = getattr(mod, attr_name) except Exception: continue try: attr_mod_name = attr.__module__ except __HOLE__: continue if attr_mod_name != mod.__name__: continue if isinstance(attr, type): types[attr_name] = attr elif callable(attr): funcs[attr_name] = attr return types, funcs
AttributeError
dataset/ETHPy150Open mahmoud/boltons/boltons/funcutils.py/get_module_callables
9,146
def __get__(self, obj, obj_type): # These assignments could've been in __init__, but there was # no simple way to do it without breaking one of PyPy or Py3. self.__name__ = None self.__doc__ = self.func.__doc__ self.__module__ = self.func.__module__ name = self.__name__ if name is None: for k, v in mro_items(obj_type): if v is self: self.__name__ = name = k if obj is None: return make_method(self, obj, obj_type) try: # since this is a data descriptor, this block # is probably only hit once (per object) return obj.__dict__[name] except __HOLE__: obj.__dict__[name] = ret = make_method(self, obj, obj_type) return ret
KeyError
dataset/ETHPy150Open mahmoud/boltons/boltons/funcutils.py/CachedInstancePartial.__get__
9,147
def __init__(self, config): super(AuthAction, self).__init__(config) try: ikey = self.config['auth']['ikey'] skey = self.config['auth']['skey'] host = self.config['auth']['host'] except __HOLE__: raise ValueError("Duo config not found in config.") self.duo_auth = duo_client.Auth(ikey=ikey, skey=skey, host=host)
KeyError
dataset/ETHPy150Open StackStorm/st2contrib/packs/duo/actions/lib/actions.py/AuthAction.__init__
9,148
def _parse(self, body, action): try: body = body[action] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except __HOLE__: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) return group_name
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/security_groups.py/SecurityGroupActionController._parse
9,149
def _extend_servers(self, req, servers): # TODO(arosen) this function should be refactored to reduce duplicate # code and use get_instance_security_groups instead of get_db_instance. if not len(servers): return key = "security_groups" context = _authorize_context(req) if not openstack_driver.is_neutron_security_groups(): for server in servers: instance = req.get_db_instance(server['id']) groups = instance.get(key) if groups: server[key] = [{"name": group["name"]} for group in groups] else: # If method is a POST we get the security groups intended for an # instance from the request. The reason for this is if using # neutron security groups the requested security groups for the # instance are not in the db and have not been sent to neutron yet. if req.method != 'POST': sg_instance_bindings = ( self.security_group_api .get_instances_security_groups_bindings(context, servers)) for server in servers: groups = sg_instance_bindings.get(server['id']) if groups: server[key] = groups # In this section of code len(servers) == 1 as you can only POST # one server in an API request. else: try: # try converting to json req_obj = jsonutils.loads(req.body) # Add security group to server, if no security group was in # request add default since that is the group it is part of servers[0][key] = req_obj['server'].get( key, [{'name': 'default'}]) except __HOLE__: root = minidom.parseString(req.body) sg_root = root.getElementsByTagName(key) groups = [] if sg_root: security_groups = sg_root[0].getElementsByTagName( 'security_group') for security_group in security_groups: groups.append( {'name': security_group.getAttribute('name')}) if not groups: groups = [{'name': 'default'}] servers[0][key] = groups
ValueError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/security_groups.py/SecurityGroupsOutputController._extend_servers
9,150
def get_data(self): json_string = self.get_json() try: data = json.loads(json_string) except (__HOLE__, TypeError): self.log.exception("Error parsing json from postfix-stats") return None return data
ValueError
dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/postfix/postfix.py/PostfixCollector.get_data
9,151
@internationalizeDocstring def list(self, irc, msg, args, optlist, glob): """[--capability=<capability>] [<glob>] Returns the valid registered usernames matching <glob>. If <glob> is not given, returns all registered usernames. """ predicates = [] for (option, arg) in optlist: if option == 'capability': if arg in conf.supybot.capabilities.private(): try: u = ircdb.users.getUser(msg.prefix) if not u._checkCapability('admin'): raise KeyError except KeyError: # Note that it may be raised by checkCapability too. irc.error(_('This is a private capability. Only admins ' 'can see who has it.'), Raise=True) def p(u, cap=arg): try: return u._checkCapability(cap) except __HOLE__: return False predicates.append(p) if glob: r = re.compile(fnmatch.translate(glob), re.I) def p(u): return r.match(u.name) is not None predicates.append(p) users = [] for u in ircdb.users.values(): for predicate in predicates: if not predicate(u): break else: users.append(u.name) if users: utils.sortBy(str.lower, users) private = self.registryValue("listInPrivate", msg.args[0]) irc.reply(format('%L', users), private=private) else: if predicates: irc.reply(_('There are no matching registered users.')) else: irc.reply(_('There are no registered users.'))
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.list
9,152
@internationalizeDocstring def register(self, irc, msg, args, name, password): """<name> <password> Registers <name> with the given password <password> and the current hostmask of the person registering. You shouldn't register twice; if you're not recognized as a user but you've already registered, use the hostmask add command to add another hostmask to your already-registered user, or use the identify command to identify just for a session. This command (and all other commands that include a password) must be sent to the bot privately, not in a channel. """ addHostmask = True try: ircdb.users.getUserId(name) irc.error(_('That name is already assigned to someone.'), Raise=True) except __HOLE__: pass if ircutils.isUserHostmask(name): irc.errorInvalid(_('username'), name, _('Hostmasks are not valid usernames.'), Raise=True) try: u = ircdb.users.getUser(msg.prefix) if u._checkCapability('owner'): addHostmask = False else: irc.error(_('Your hostmask is already registered to %s') % u.name) return except KeyError: pass user = ircdb.users.newUser() user.name = name user.setPassword(password) if addHostmask: user.addHostmask(msg.prefix) ircdb.users.setUser(user) irc.replySuccess()
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.register
9,153
@internationalizeDocstring def unregister(self, irc, msg, args, user, password): """<name> [<password>] Unregisters <name> from the user database. If the user giving this command is an owner user, the password is not necessary. """ try: caller = ircdb.users.getUser(msg.prefix) isOwner = caller._checkCapability('owner') except __HOLE__: caller = None isOwner = False if not conf.supybot.databases.users.allowUnregistration(): if not caller or not isOwner: self.log.warning('%s tried to unregister user %s.', msg.prefix, user.name) irc.error(_('This command has been disabled. You\'ll have to ' 'ask the owner of this bot to unregister your ' 'user.'), Raise=True) if isOwner or user.checkPassword(password): ircdb.users.delUser(user.id) irc.replySuccess() else: irc.error(conf.supybot.replies.incorrectAuthentication())
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.unregister
9,154
@internationalizeDocstring def changename(self, irc, msg, args, user, newname, password): """<name> <new name> [<password>] Changes your current user database name to the new name given. <password> is only necessary if the user isn't recognized by hostmask. This message must be sent to the bot privately (not on a channel) since it may contain a password. """ try: id = ircdb.users.getUserId(newname) irc.error(format(_('%q is already registered.'), newname)) return except __HOLE__: pass if user.checkHostmask(msg.prefix) or user.checkPassword(password): user.name = newname ircdb.users.setUser(user) irc.replySuccess()
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.changename
9,155
@internationalizeDocstring def password(self, irc, msg, args, user, password, newpassword): """[<name>] <old password> <new password> Sets the new password for the user specified by <name> to <new password>. Obviously this message must be sent to the bot privately (not in a channel). If the requesting user is an owner user, then <old password> needn't be correct. """ try: u = ircdb.users.getUser(msg.prefix) except __HOLE__: u = None if user is None: if u is None: irc.errorNotRegistered(Raise=True) user = u if user.checkPassword(password) or \ (u and u._checkCapability('owner')): user.setPassword(newpassword) ircdb.users.setUser(user) irc.replySuccess() else: irc.error(conf.supybot.replies.incorrectAuthentication())
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.set.password
9,156
@internationalizeDocstring def username(self, irc, msg, args, hostmask): """<hostmask|nick> Returns the username of the user specified by <hostmask> or <nick> if the user is registered. """ if ircutils.isNick(hostmask): try: hostmask = irc.state.nickToHostmask(hostmask) except KeyError: irc.error(_('I haven\'t seen %s.') % hostmask, Raise=True) try: user = ircdb.users.getUser(hostmask) irc.reply(user.name) except __HOLE__: irc.error(_('I don\'t know who that is.'))
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.username
9,157
@internationalizeDocstring def list(self, irc, msg, args, name): """[<name>] Returns the hostmasks of the user specified by <name>; if <name> isn't specified, returns the hostmasks of the user calling the command. """ def getHostmasks(user): hostmasks = list(map(repr, user.hostmasks)) if hostmasks: hostmasks.sort() return format('%L', hostmasks) else: return format(_('%s has no registered hostmasks.'), user.name) try: user = ircdb.users.getUser(msg.prefix) if name: if name != user.name and \ not ircdb.checkCapability(msg.prefix, 'owner'): irc.error(_('You may only retrieve your own ' 'hostmasks.'), Raise=True) else: try: user = ircdb.users.getUser(name) irc.reply(getHostmasks(user)) except __HOLE__: irc.errorNoUser() else: irc.reply(getHostmasks(user)) except KeyError: irc.errorNotRegistered()
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.hostmask.list
9,158
@internationalizeDocstring def add(self, irc, msg, args, user, hostmask, password): """[<name>] [<hostmask>] [<password>] Adds the hostmask <hostmask> to the user specified by <name>. The <password> may only be required if the user is not recognized by hostmask. <password> is also not required if an owner user is giving the command on behalf of some other user. If <hostmask> is not given, it defaults to your current hostmask. If <name> is not given, it defaults to your currently identified name. This message must be sent to the bot privately (not on a channel) since it may contain a password. """ if not hostmask: hostmask = msg.prefix if not ircutils.isUserHostmask(hostmask): irc.errorInvalid(_('hostmask'), hostmask, _('Make sure your hostmask includes a nick, ' 'then an exclamation point (!), then a user, ' 'then an at symbol (@), then a host. Feel ' 'free to use wildcards (* and ?, which work ' 'just like they do on the command line) in ' 'any of these parts.'), Raise=True) try: otherId = ircdb.users.getUserId(hostmask) if otherId != user.id: irc.error(_('That hostmask is already registered.'), Raise=True) except KeyError: pass if not user.checkPassword(password) and \ not user.checkHostmask(msg.prefix): try: u = ircdb.users.getUser(msg.prefix) except __HOLE__: irc.error(conf.supybot.replies.incorrectAuthentication(), Raise=True) if not u._checkCapability('owner'): irc.error(conf.supybot.replies.incorrectAuthentication(), Raise=True) try: user.addHostmask(hostmask) except ValueError as e: irc.error(str(e), Raise=True) try: ircdb.users.setUser(user) except ircdb.DuplicateHostmask: irc.error(_('That hostmask is already registered.'), Raise=True) except ValueError as e: irc.error(str(e), Raise=True) irc.replySuccess()
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.hostmask.add
9,159
@internationalizeDocstring def remove(self, irc, msg, args, user, hostmask, password): """[<name>] [<hostmask>] [<password>] Removes the hostmask <hostmask> from the record of the user specified by <name>. If the hostmask given is 'all' then all hostmasks will be removed. The <password> may only be required if the user is not recognized by their hostmask. This message must be sent to the bot privately (not on a channel) since it may contain a password. If <hostmask> is not given, it defaults to your current hostmask. If <name> is not given, it defaults to your currently identified name. """ if not hostmask: hostmask = msg.prefix if not user.checkPassword(password) and \ not user.checkHostmask(msg.prefix): u = ircdb.users.getUser(msg.prefix) if not u._checkCapability('owner'): irc.error(conf.supybot.replies.incorrectAuthentication()) return try: s = '' if hostmask == 'all': user.hostmasks.clear() s = _('All hostmasks removed.') else: user.removeHostmask(hostmask) except __HOLE__: irc.error(_('There was no such hostmask.')) return ircdb.users.setUser(user) irc.replySuccess(s)
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.hostmask.remove
9,160
@internationalizeDocstring def capabilities(self, irc, msg, args, user): """[<name>] Returns the capabilities of the user specified by <name>; if <name> isn't specified, returns the capabilities of the user calling the command. """ try: u = ircdb.users.getUser(msg.prefix) except __HOLE__: irc.errorNotRegistered() else: if u == user or u._checkCapability('admin'): irc.reply('[%s]' % '; '.join(user.capabilities), private=True) else: irc.error(conf.supybot.replies.incorrectAuthentication(), Raise=True)
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.capabilities
9,161
@internationalizeDocstring def identify(self, irc, msg, args, user, password): """<name> <password> Identifies the user as <name>. This command (and all other commands that include a password) must be sent to the bot privately, not in a channel. """ if user.checkPassword(password): try: user.addAuth(msg.prefix) ircdb.users.setUser(user, flush=False) irc.replySuccess() except __HOLE__: irc.error(_('Your secure flag is true and your hostmask ' 'doesn\'t match any of your known hostmasks.')) else: self.log.warning('Failed identification attempt by %s (password ' 'did not match for %s).', msg.prefix, user.name) irc.error(conf.supybot.replies.incorrectAuthentication())
ValueError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.identify
9,162
@internationalizeDocstring def whoami(self, irc, msg, args): """takes no arguments Returns the name of the user calling the command. """ try: user = ircdb.users.getUser(msg.prefix) irc.reply(user.name) except __HOLE__: irc.reply(_('I don\'t recognize you. You can message me either of these two commands: "user identify <username> <password>" to log in or "user register <username> <password>" to register.'))
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.whoami
9,163
@internationalizeDocstring def stats(self, irc, msg, args): """takes no arguments Returns some statistics on the user database. """ users = 0 owners = 0 admins = 0 hostmasks = 0 for user in ircdb.users.values(): users += 1 hostmasks += len(user.hostmasks) try: if user._checkCapability('owner'): owners += 1 elif user._checkCapability('admin'): admins += 1 except __HOLE__: pass irc.reply(format(_('I have %s registered users ' 'with %s registered hostmasks; ' '%n and %n.'), users, hostmasks, (owners, 'owner'), (admins, 'admin')))
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/User/plugin.py/User.stats
9,164
def run_test(args, i, t): out = {} outdir = None if args.force_test_tool: t["tool"] = args.force_test_tool if t["tool"].split("/")[-1] != re.sub("\-test\.yaml$", ".cwl",args.test.split("/")[-1]): raise CompareFail("%s != %s" % (t["tool"].split("/")[-1],re.sub("\-test\.yaml$",".cwl",args.test.split("/")[-1]))) try: if "output" in t and not args.conformance_test: test_command = [args.tool] if "outdir" in t: outdir = t["outdir"] else: # Add prefixes if running on MacOSX so that boot2docker writes to /Users if 'darwin' in sys.platform: outdir = tempfile.mkdtemp(prefix=os.path.abspath(os.path.curdir)) else: outdir = tempfile.mkdtemp() if args.tmp: test_command.extend(["--tmp-outdir-prefix={}".format(outdir), "--tmpdir-prefix={}".format(outdir)]) if args.push_image: test_command.extend(["--push-image"]) test_command.extend(["--outdir={}".format(outdir), t["tool"], t["job"]]) outstr = subprocess.check_output(test_command) out = {"output": json.loads(outstr)} else: test_command = [args.tool, "--conformance-test", "--basedir=" + args.basedir, "--no-container", "--quiet", t["tool"], t["job"]] outstr = subprocess.check_output(test_command) outstr = re.sub("^\[.*\n", "", outstr) out = yaml.load(outstr) except __HOLE__ as v: _logger.error(v) _logger.error(outstr) except subprocess.CalledProcessError as err: if err.returncode == UNSUPPORTED_FEATURE: return UNSUPPORTED_FEATURE else: _logger.error("""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command])) _logger.error(t.get("doc")) _logger.error("Returned non-zero") return 1 except yaml.scanner.ScannerError as e: _logger.error("""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command])) _logger.error(outstr) _logger.error("Parse error %s", str(e)) pwd = os.path.abspath(os.path.dirname(t["job"])) # t["args"] = map(lambda x: x.replace("$PWD", pwd), t["args"]) # if "stdin" in t: # t["stdin"] = t["stdin"].replace("$PWD", pwd) failed = False if "output" in t and not args.conformance_test: checkkeys = ["output"] else: checkkeys = ["args", "stdin", "stdout", "createfiles"] ignore = None if "ignore_keys" in t: ignore = t["ignore_keys"] for key in checkkeys: try: compare(t.get(key), out.get(key),ignore) except CompareFail as ex: _logger.warn("""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command])) _logger.warn(t.get("doc")) _logger.warn("%s expected %s\n got %s", key, json.dumps(t.get(key), indent=4, sort_keys=True), json.dumps(out.get(key), indent=4, sort_keys=True)) _logger.warn("Compare failure %s", ex) failed = True # if outdir: # shutil.rmtree(outdir, True) if failed: return 1 else: return 0
ValueError
dataset/ETHPy150Open common-workflow-language/workflows/test/cwltest.py/run_test
9,165
@utils.accept_singleton(basestring) @utils.empty_arg_shortcircuit(return_code='{}') def upload(self, filepaths, transcode_quality='320k', enable_matching=False): """Uploads the given filepaths. All non-mp3 files will be transcoded before being uploaded. This is a limitation of Google's backend. An available installation of ffmpeg or avconv is required in most cases: see `the installation page <https://unofficial-google-music-api.readthedocs.org/en /latest/usage.html?#installation>`__ for details. Returns a 3-tuple ``(uploaded, matched, not_uploaded)`` of dictionaries, eg:: ( {'<filepath>': '<new server id>'}, # uploaded {'<filepath>': '<new server id>'}, # matched {'<filepath>': '<reason, eg ALREADY_EXISTS>'} # not uploaded ) :param filepaths: a list of filepaths, or a single filepath. :param transcode_quality: if int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame (`lower-better int, <http://trac.ffmpeg.org/wiki/Encoding%20VBR%20(Variable%20Bit%20Rate)%20mp3%20audio>`__). If string, pass to ffmpeg/avconv ``-b:a`` (eg ``'128k'`` for an average bitrate of 128k). The default is 320kbps cbr (the highest possible quality). :param enable_matching: if ``True``, attempt to use `scan and match <http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__ to avoid uploading every song. This requires ffmpeg or avconv. **WARNING**: currently, mismatched songs can *not* be fixed with the 'Fix Incorrect Match' button nor :py:func:`report_incorrect_match <gmusicapi.clients.Webclient.report_incorrect_match>`. They would have to be deleted and reuploaded with matching disabled (or with the Music Manager). Fixing matches from gmusicapi may be supported in a future release; see issue `#89 <https://github.com/simon-weber/gmusicapi/issues/89>`__. All Google-supported filetypes are supported; see `Google's documentation <http://support.google.com/googleplay/bin/answer.py?hl=en&answer=1100462>`__. If ``PERMANENT_ERROR`` is given as a not_uploaded reason, attempts to reupload will never succeed. The file will need to be changed before the server will reconsider it; the easiest way is to change metadata tags (it's not important that the tag be uploaded, just that the contents of the file change somehow). """ if self.uploader_id is None or self.uploader_name is None: raise NotLoggedIn("Not authenticated as an upload device;" " run Api.login(...perform_upload_auth=True...)" " first.") # TODO there is way too much code in this function. # To return. uploaded = {} matched = {} not_uploaded = {} # Gather local information on the files. local_info = {} # {clientid: (path, Track)} for path in filepaths: try: track = musicmanager.UploadMetadata.fill_track_info(path) except BaseException as e: self.logger.exception("problem gathering local info of '%r'", path) user_err_msg = str(e) if 'Non-ASCII strings must be converted to unicode' in str(e): # This is a protobuf-specific error; they require either ascii or unicode. # To keep behavior consistent, make no effort to guess - require users # to decode first. user_err_msg = ("nonascii bytestrings must be decoded to unicode" " (error: '%s')" % user_err_msg) not_uploaded[path] = user_err_msg else: local_info[track.client_id] = (path, track) if not local_info: return uploaded, matched, not_uploaded # TODO allow metadata faking # Upload metadata; the server tells us what to do next. res = self._make_call(musicmanager.UploadMetadata, [t for (path, t) in local_info.values()], self.uploader_id) # TODO checking for proper contents should be handled in verification md_res = res.metadata_response responses = [r for r in md_res.track_sample_response] sample_requests = [req for req in md_res.signed_challenge_info] # Send scan and match samples if requested. for sample_request in sample_requests: path, track = local_info[sample_request.challenge_info.client_track_id] bogus_sample = None if not enable_matching: bogus_sample = b'' # just send empty bytes try: res = self._make_call(musicmanager.ProvideSample, path, sample_request, track, self.uploader_id, bogus_sample) except (IOError, __HOLE__) as e: self.logger.warning("couldn't create scan and match sample for '%r': %s", path, str(e)) not_uploaded[path] = str(e) else: responses.extend(res.sample_response.track_sample_response) # Read sample responses and prep upload requests. to_upload = {} # {serverid: (path, Track, do_not_rematch?)} for sample_res in responses: path, track = local_info[sample_res.client_track_id] if sample_res.response_code == upload_pb2.TrackSampleResponse.MATCHED: self.logger.info("matched '%r' to sid %s", path, sample_res.server_track_id) matched[path] = sample_res.server_track_id if not enable_matching: self.logger.error("'%r' was matched without matching enabled", path) elif sample_res.response_code == upload_pb2.TrackSampleResponse.UPLOAD_REQUESTED: to_upload[sample_res.server_track_id] = (path, track, False) else: # there was a problem # report the symbolic name of the response code enum for debugging enum_desc = upload_pb2._TRACKSAMPLERESPONSE.enum_types[0] res_name = enum_desc.values_by_number[sample_res.response_code].name err_msg = "TrackSampleResponse code %s: %s" % (sample_res.response_code, res_name) if res_name == 'ALREADY_EXISTS': # include the sid, too # this shouldn't be relied on externally, but I use it in # tests - being surrounded by parens is how it's matched err_msg += "(%s)" % sample_res.server_track_id self.logger.warning("upload of '%r' rejected: %s", path, err_msg) not_uploaded[path] = err_msg # Send upload requests. if to_upload: # TODO reordering requests could avoid wasting time waiting for reup sync self._make_call(musicmanager.UpdateUploadState, 'start', self.uploader_id) for server_id, (path, track, do_not_rematch) in to_upload.items(): # It can take a few tries to get an session. should_retry = True attempts = 0 while should_retry and attempts < 10: session = self._make_call(musicmanager.GetUploadSession, self.uploader_id, len(uploaded), track, path, server_id, do_not_rematch) attempts += 1 got_session, error_details = \ musicmanager.GetUploadSession.process_session(session) if got_session: self.logger.info("got an upload session for '%r'", path) break should_retry, reason, error_code = error_details self.logger.debug("problem getting upload session: %s\ncode=%s retrying=%s", reason, error_code, should_retry) if error_code == 200 and do_not_rematch: # reupload requests need to wait on a server sync # 200 == already uploaded, so force a retry in this case should_retry = True time.sleep(6) # wait before retrying else: err_msg = "GetUploadSession error %s: %s" % (error_code, reason) self.logger.warning("giving up on upload session for '%r': %s", path, err_msg) not_uploaded[path] = err_msg continue # to next upload # got a session, do the upload # this terribly inconsistent naming isn't my fault: Google-- session = session['sessionStatus'] external = session['externalFieldTransfers'][0] session_url = external['putInfo']['url'] content_type = external.get('content_type', 'audio/mpeg') if track.original_content_type != locker_pb2.Track.MP3: try: self.logger.info("transcoding '%r' to mp3", path) contents = utils.transcode_to_mp3(path, quality=transcode_quality) except (IOError, ValueError) as e: self.logger.warning("error transcoding %r: %s", path, e) not_uploaded[path] = "transcoding error: %s" % e continue else: with open(path, 'rb') as f: contents = f.read() upload_response = self._make_call(musicmanager.UploadFile, session_url, content_type, contents) success = upload_response.get('sessionStatus', {}).get('state') if success: uploaded[path] = server_id else: # 404 == already uploaded? serverside check on clientid? self.logger.debug("could not finalize upload of '%r'. response: %s", path, upload_response) not_uploaded[path] = 'could not finalize upload; details in log' self._make_call(musicmanager.UpdateUploadState, 'stopped', self.uploader_id) return uploaded, matched, not_uploaded
ValueError
dataset/ETHPy150Open simon-weber/gmusicapi/gmusicapi/clients/musicmanager.py/Musicmanager.upload
9,166
def configure_cache_backend(self, value): if value is None: # DEFAULT_CACHE_ALIAS doesn't exist in Django<=1.2 try: from django.core.cache import DEFAULT_CACHE_ALIAS as default_cache_alias except ImportError: default_cache_alias = 'default' caches = getattr(settings, 'CACHES', None) if caches is None: # Support Django<=1.2 there is no default `CACHES` setting try: from django.core.cache.backends.dummy import DummyCache except __HOLE__: dummy_cache = 'dummy://' else: dummy_cache = 'django.core.cache.backends.dummy.DummyCache' return dummy_cache if default_cache_alias in caches: value = default_cache_alias else: raise ValueError("The default cache alias '%s' is not available in CACHES" % default_cache_alias) return value
ImportError
dataset/ETHPy150Open matthewwithanm/django-imagekit/imagekit/conf.py/ImageKitConf.configure_cache_backend
9,167
def cache_authenticate(self): cookie = "" try: f = open(".rt_cache", "r") cookie = f.read().strip() except __HOLE__: return None if not(self.cert_check(RTConnect.RT_URL,443)): print "SSL Failed! Something is wrong!" return 0 test_agent = httplib.HTTPSConnection("rt.rescomp.berkeley.edu", 443, timeout=30) test_headers = {"User-Agent": RTConnect.USER_AGENT, "Connection": "keep-alive", "Cookie": cookie} test_agent.request("GET", "/REST/1.0/index.html", "", test_headers) test_response = test_agent.getresponse() if test_response.status == 200: return cookie else: return None # RT's default format for emails delivered via REST is pretty messy, so this cleans it up.
IOError
dataset/ETHPy150Open maxburkhardt/nessus-parser/util/rt.py/RTConnect.cache_authenticate
9,168
def process_record(self, new, old=None): """Validate records against collection schema, if any.""" new = super(Record, self).process_record(new, old) schema = self._collection.get('schema') settings = self.request.registry.settings schema_validation = 'experimental_collection_schema_validation' if not schema or not asbool(settings.get(schema_validation)): return new collection_timestamp = self._collection[self.model.modified_field] try: stripped = copy.deepcopy(new) stripped.pop(self.model.id_field, None) stripped.pop(self.model.modified_field, None) stripped.pop(self.model.permissions_field, None) stripped.pop(self.schema_field, None) jsonschema.validate(stripped, schema) except jsonschema_exceptions.ValidationError as e: try: field = e.path.pop() if e.path else e.validator_value.pop() except __HOLE__: field = None raise_invalid(self.request, name=field, description=e.message) new[self.schema_field] = collection_timestamp return new
AttributeError
dataset/ETHPy150Open Kinto/kinto/kinto/views/records.py/Record.process_record
9,169
def store(self, response): """ Takes an HTTP response object and stores it in the cache according to RFC 2616. Returns a boolean value indicating whether the response was cached or not. :param response: Requests :class:`Response <Response>` object to cache. """ # Define an internal utility function. def date_header_or_default(header_name, default, response): try: date_header = response.headers[header_name] except __HOLE__: value = default else: value = parse_date_header(date_header) return value if response.status_code not in CACHEABLE_RCS: return False if response.request.method not in CACHEABLE_VERBS: return False url = response.url now = datetime.utcnow() # Get the value of the 'Date' header, if it exists. If it doesn't, just # use now. creation = date_header_or_default('Date', now, response) # Get the value of the 'Cache-Control' header, if it exists. cc = response.headers.get('Cache-Control', None) if cc is not None: expiry = expires_from_cache_control(cc, now) # If the above returns None, we are explicitly instructed not to # cache this. if expiry is None: return False # Get the value of the 'Expires' header, if it exists, and if we don't # have anything from the 'Cache-Control' header. if cc is None: expiry = date_header_or_default('Expires', None, response) # If the expiry date is earlier or the same as the Date header, don't # cache the response at all. if expiry is not None and expiry <= creation: return False # If there's a query portion of the url and it's a GET, don't cache # this unless explicitly instructed to. if expiry is None and response.request.method == 'GET': if url_contains_query(url): return False self._cache[url] = {'response': response, 'creation': creation, 'expiry': expiry} self.__reduce_cache_count() return True
KeyError
dataset/ETHPy150Open Lukasa/httpcache/httpcache/cache.py/HTTPCache.store
9,170
def handle_304(self, response): """ Given a 304 response, retrieves the cached entry. This unconditionally returns the cached entry, so it can be used when the 'intelligent' behaviour of retrieve() is not desired. Returns None if there is no entry in the cache. :param response: The 304 response to find the cached entry for. Should be a Requests :class:`Response <Response>`. """ try: cached_response = self._cache[response.url]['response'] except __HOLE__: cached_response = None return cached_response
KeyError
dataset/ETHPy150Open Lukasa/httpcache/httpcache/cache.py/HTTPCache.handle_304
9,171
def retrieve(self, request): """ Retrieves a cached response if possible. If there is a response that can be unconditionally returned (e.g. one that had a Cache-Control header set), that response is returned. If there is one that can be conditionally returned (if a 304 is returned), applies an If-Modified-Since header to the request and returns None. :param request: The Requests :class:`PreparedRequest <PreparedRequest>` object. """ return_response = None url = request.url try: cached_response = self._cache[url] except __HOLE__: return None if request.method not in NON_INVALIDATING_VERBS: del self._cache[url] return None if cached_response['expiry'] is None: # We have no explicit expiry time, so we weren't instructed to # cache. Add an 'If-Modified-Since' header. creation = cached_response['creation'] header = build_date_header(creation) request.headers['If-Modified-Since'] = header else: # We have an explicit expiry time. If we're earlier than the expiry # time, return the response. now = datetime.utcnow() if now <= cached_response['expiry']: return_response = cached_response['response'] else: del self._cache[url] return return_response
KeyError
dataset/ETHPy150Open Lukasa/httpcache/httpcache/cache.py/HTTPCache.retrieve
9,172
def detach_volume(self, connection_info, instance, mountpoint, encryption=None): """Detach the disk attached to the instance.""" try: del self._mounts[instance.name][mountpoint] except __HOLE__: pass
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/virt/fake.py/FakeDriver.detach_volume
9,173
def detach_interface(self, instance, vif): try: del self._interfaces[vif['id']] except __HOLE__: raise exception.InterfaceDetachFailed( instance_uuid=instance.uuid)
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/virt/fake.py/FakeDriver.detach_interface
9,174
def stop(self): """ Tells the ChromeDriver to stop and cleans up the process """ #If its dead dont worry if self.process is None: return #Tell the Server to die! try: from urllib import request as url_request except __HOLE__: import urllib2 as url_request url_request.urlopen("http://127.0.0.1:%d/shutdown" % self.port) count = 0 while utils.is_connectable(self.port): if count == 30: break count += 1 time.sleep(1) #Tell the Server to properly die in case try: if self.process: self.process.kill() self.process.wait() except OSError: # kill may not be available under windows environment pass
ImportError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/selenium/webdriver/chrome/service.py/Service.stop
9,175
@logging_level.setter def logging_level(self, value): if value is None: value = self._default_logging_level if isinstance(value, (bytes, unicode)): try: level = _levelNames[value.upper()] except __HOLE__: raise ValueError('Unrecognized logging level: {}'.format(value)) else: try: level = int(value) except ValueError: raise ValueError('Unrecognized logging level: {}'.format(value)) self._logger.setLevel(level)
KeyError
dataset/ETHPy150Open splunk/splunk-sdk-python/splunklib/searchcommands/search_command.py/SearchCommand.logging_level
9,176
@property def search_results_info(self): """ Returns the search results info for this command invocation. The search results info object is created from the search results info file associated with the command invocation. :return: Search results info:const:`None`, if the search results info file associated with the command invocation is inaccessible. :rtype: SearchResultsInfo or NoneType """ if self._search_results_info is not None: return self._search_results_info if self._protocol_version == 1: try: path = self._input_header['infoPath'] except KeyError: return None else: assert self._protocol_version == 2 try: dispatch_dir = self._metadata.searchinfo.dispatch_dir except AttributeError: return None path = os.path.join(dispatch_dir, 'info.csv') try: with open(path, 'rb') as f: reader = csv.reader(f, dialect=CsvDialect) fields = reader.next() values = reader.next() except IOError as error: if error.errno == 2: self.logger.error('Search results info file {} does not exist.'.format(json_encode_string(path))) return raise def convert_field(field): return (field[1:] if field[0] == '_' else field).replace('.', '_') decode = MetadataDecoder().decode def convert_value(value): try: return decode(value) if len(value) > 0 else value except ValueError: return value info = ObjectView(dict(imap(lambda (f, v): (convert_field(f), convert_value(v)), izip(fields, values)))) try: count_map = info.countMap except AttributeError: pass else: count_map = count_map.split(';') n = len(count_map) info.countMap = dict(izip(islice(count_map, 0, n, 2), islice(count_map, 1, n, 2))) try: msg_type = info.msgType msg_text = info.msg except AttributeError: pass else: messages = ifilter(lambda (t, m): t or m, izip(msg_type.split('\n'), msg_text.split('\n'))) info.msg = [Message(message) for message in messages] del info.msgType try: info.vix_families = ElementTree.fromstring(info.vix_families) except __HOLE__: pass self._search_results_info = info return info
AttributeError
dataset/ETHPy150Open splunk/splunk-sdk-python/splunklib/searchcommands/search_command.py/SearchCommand.search_results_info
9,177
@property def service(self): """ Returns a Splunk service object for this command invocation or None. The service object is created from the Splunkd URI and authentication token passed to the command invocation in the search results info file. This data is not passed to a command invocation by default. You must request it by specifying this pair of configuration settings in commands.conf: .. code-block:: python enableheader = true requires_srinfo = true The :code:`enableheader` setting is :code:`true` by default. Hence, you need not set it. The :code:`requires_srinfo` setting is false by default. Hence, you must set it. :return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value of :code:`None` is returned. """ if self._service is not None: return self._service metadata = self._metadata if metadata is None: return None try: searchinfo = self._metadata.searchinfo except __HOLE__: return None splunkd_uri = searchinfo.splunkd_uri if splunkd_uri is None: return None uri = urlsplit(splunkd_uri, allow_fragments=False) self._service = Service( scheme=uri.scheme, host=uri.hostname, port=uri.port, app=searchinfo.app, token=searchinfo.session_key) return self._service # endregion # region Methods
AttributeError
dataset/ETHPy150Open splunk/splunk-sdk-python/splunklib/searchcommands/search_command.py/SearchCommand.service
9,178
def _prepare_protocol_v1(self, argv, ifile, ofile): debug = environment.splunklib_logger.debug # Provide as much context as possible in advance of parsing the command line and preparing for execution self._input_header.read(ifile) self._protocol_version = 1 self._map_metadata(argv) debug(' metadata=%r, input_header=%r', self._metadata, self._input_header) try: tempfile.tempdir = self._metadata.searchinfo.dispatch_dir except __HOLE__: raise RuntimeError('{}.metadata.searchinfo.dispatch_dir is undefined'.format(self.__class__.__name__)) debug(' tempfile.tempdir=%r', tempfile.tempdir) CommandLineParser.parse(self, argv[2:]) self.prepare() if self.record: self.record = False record_argv = [argv[0], argv[1], str(self._options), ' '.join(self.fieldnames)] ifile, ofile = self._prepare_recording(record_argv, ifile, ofile) self._record_writer.ofile = ofile ifile.record(str(self._input_header), '\n\n') if self.show_configuration: self.write_info(self.name + ' command configuration: ' + str(self._configuration)) return ifile # wrapped, if self.record is True
AttributeError
dataset/ETHPy150Open splunk/splunk-sdk-python/splunklib/searchcommands/search_command.py/SearchCommand._prepare_protocol_v1
9,179
def _process_protocol_v1(self, argv, ifile, ofile): debug = environment.splunklib_logger.debug class_name = self.__class__.__name__ debug('%s.process started under protocol_version=1', class_name) self._record_writer = RecordWriterV1(ofile) # noinspection PyBroadException try: if argv[1] == '__GETINFO__': debug('Writing configuration settings') ifile = self._prepare_protocol_v1(argv, ifile, ofile) self._record_writer.write_record({ n: ','.join(v) if isinstance(v, (list, tuple)) else v for n, v in self._configuration.iteritems()}) self.finish() elif argv[1] == '__EXECUTE__': debug('Executing') ifile = self._prepare_protocol_v1(argv, ifile, ofile) self._records = self._records_protocol_v1 self._metadata.action = 'execute' self._execute(ifile, None) else: message = ( 'Command {0} appears to be statically configured for search command protocol version 1 and static ' 'configuration is unsupported by splunklib.searchcommands. Please ensure that ' 'default/commands.conf contains this stanza:\n' '[{0}]\n' 'filename = {1}\n' 'enableheader = true\n' 'outputheader = true\n' 'requires_srinfo = true\n' 'supports_getinfo = true\n' 'supports_multivalues = true\n' 'supports_rawargs = true'.format(self.name, os.path.basename(argv[0]))) raise RuntimeError(message) except (SyntaxError, __HOLE__) as error: self.write_error(unicode(error)) self.flush() exit(0) except SystemExit: self.flush() raise except: self._report_unexpected_error() self.flush() exit(1) debug('%s.process finished under protocol_version=1', class_name)
ValueError
dataset/ETHPy150Open splunk/splunk-sdk-python/splunklib/searchcommands/search_command.py/SearchCommand._process_protocol_v1
9,180
def _process_protocol_v2(self, argv, ifile, ofile): """ Processes records on the `input stream optionally writing records to the output stream. :param ifile: Input file object. :type ifile: file or InputType :param ofile: Output file object. :type ofile: file or OutputType :return: :const:`None` """ debug = environment.splunklib_logger.debug class_name = self.__class__.__name__ debug('%s.process started under protocol_version=2', class_name) self._protocol_version = 2 # Read search command metadata from splunkd # noinspection PyBroadException try: debug('Reading metadata') metadata, body = self._read_chunk(ifile) action = getattr(metadata, 'action', None) if action != 'getinfo': raise RuntimeError('Expected getinfo action, not {}'.format(action)) if len(body) > 0: raise RuntimeError('Did not expect data for getinfo action') self._metadata = deepcopy(metadata) searchinfo = self._metadata.searchinfo searchinfo.earliest_time = float(searchinfo.earliest_time) searchinfo.latest_time = float(searchinfo.latest_time) searchinfo.search = unquote(searchinfo.search) self._map_input_header() debug(' metadata=%r, input_header=%r', self._metadata, self._input_header) try: tempfile.tempdir = self._metadata.searchinfo.dispatch_dir except AttributeError: raise RuntimeError('%s.metadata.searchinfo.dispatch_dir is undefined'.format(class_name)) debug(' tempfile.tempdir=%r', tempfile.tempdir) except: self._record_writer = RecordWriterV2(ofile) self._report_unexpected_error() self.finish() exit(1) # Write search command configuration for consumption by splunkd # noinspection PyBroadException try: self._record_writer = RecordWriterV2(ofile, getattr(self._metadata, 'maxresultrows', None)) self.fieldnames = [] self.options.reset() args = self.metadata.searchinfo.args error_count = 0 debug('Parsing arguments') if args and type(args) == list: for arg in args: result = arg.split('=', 1) if len(result) == 1: self.fieldnames.append(result[0]) else: name, value = result try: option = self.options[name] except KeyError: self.write_error('Unrecognized option: {}={}'.format(name, value)) error_count += 1 continue try: option.value = value except ValueError: self.write_error('Illegal value: {}={}'.format(name, value)) error_count += 1 continue missing = self.options.get_missing() if missing is not None: if len(missing) == 1: self.write_error('A value for "{}" is required'.format(missing[0])) else: self.write_error('Values for these required options are missing: {}'.format(', '.join(missing))) error_count += 1 if error_count > 0: exit(1) debug(' command: %s', unicode(self)) debug('Preparing for execution') self.prepare() if self.record: ifile, ofile = self._prepare_recording(argv, ifile, ofile) self._record_writer.ofile = ofile # Record the metadata that initiated this command after removing the record option from args/raw_args info = self._metadata.searchinfo for attr in 'args', 'raw_args': setattr(info, attr, [arg for arg in getattr(info, attr) if not arg.startswith('record=')]) metadata = MetadataEncoder().encode(self._metadata) ifile.record('chunked 1.0,', unicode(len(metadata)), ',0\n', metadata) if self.show_configuration: self.write_info(self.name + ' command configuration: ' + str(self._configuration)) debug(' command configuration: %s', self._configuration) except __HOLE__: self._record_writer.write_metadata(self._configuration) self.finish() raise except: self._record_writer.write_metadata(self._configuration) self._report_unexpected_error() self.finish() exit(1) self._record_writer.write_metadata(self._configuration) # Execute search command on data passing through the pipeline # noinspection PyBroadException try: debug('Executing under protocol_version=2') self._records = self._records_protocol_v2 self._metadata.action = 'execute' self._execute(ifile, None) except SystemExit: self.finish() raise except: self._report_unexpected_error() self.finish() exit(1) debug('%s.process completed', class_name)
SystemExit
dataset/ETHPy150Open splunk/splunk-sdk-python/splunklib/searchcommands/search_command.py/SearchCommand._process_protocol_v2
9,181
def _records_protocol_v1(self, ifile): reader = csv.reader(ifile, dialect=CsvDialect) try: fieldnames = reader.next() except __HOLE__: return mv_fieldnames = {name: name[len('__mv_'):] for name in fieldnames if name.startswith('__mv_')} if len(mv_fieldnames) == 0: for values in reader: yield OrderedDict(izip(fieldnames, values)) return for values in reader: record = OrderedDict() for fieldname, value in izip(fieldnames, values): if fieldname.startswith('__mv_'): if len(value) > 0: record[mv_fieldnames[fieldname]] = self._decode_list(value) elif fieldname not in record: record[fieldname] = value yield record
StopIteration
dataset/ETHPy150Open splunk/splunk-sdk-python/splunklib/searchcommands/search_command.py/SearchCommand._records_protocol_v1
9,182
def _records_protocol_v2(self, ifile): while True: result = self._read_chunk(ifile) if not result: return metadata, body = result action = getattr(metadata, 'action', None) if action != 'execute': raise RuntimeError('Expected execute action, not {}'.format(action)) finished = getattr(metadata, 'finished', False) self._record_writer.is_flushed = False if len(body) > 0: reader = csv.reader(StringIO(body), dialect=CsvDialect) try: fieldnames = reader.next() except __HOLE__: return mv_fieldnames = {name: name[len('__mv_'):] for name in fieldnames if name.startswith('__mv_')} if len(mv_fieldnames) == 0: for values in reader: yield OrderedDict(izip(fieldnames, values)) else: for values in reader: record = OrderedDict() for fieldname, value in izip(fieldnames, values): if fieldname.startswith('__mv_'): if len(value) > 0: record[mv_fieldnames[fieldname]] = self._decode_list(value) elif fieldname not in record: record[fieldname] = value yield record if finished: return self.flush()
StopIteration
dataset/ETHPy150Open splunk/splunk-sdk-python/splunklib/searchcommands/search_command.py/SearchCommand._records_protocol_v2
9,183
def get_covariance_matrix(cosmo, data, command_line): """ Compute the covariance matrix, from an input file or from an existing matrix. Reordering of the names and scaling take place here, in a serie of potentially hard to read methods. For the sake of clarity, and to avoid confusions, the code will, by default, print out a succession of 4 covariance matrices at the beginning of the run, if starting from an existing one. This way, you can control that the paramters are set properly. .. note:: The set of parameters from the run need not to be the exact same set of parameters from the existing covariance matrix (not even the ordering). Missing parameter from the existing covariance matrix will use the sigma given as an input. """ # Setting numpy options in terms of precision (useful when writing to files # or displaying a result, but does not affect the precision of the # computation). np.set_printoptions(precision=2, linewidth=150) parameter_names = data.get_mcmc_parameters(['varying']) # Define quiet setting if not previously defined try: command_line.quiet except: command_line.quiet = False if command_line.fisher and not command_line.cov: # We will work out the fisher matrix for all the parameters and # write it to a file if not command_line.silent: warnings.warn("Fisher implementation is being tested") # Let us create a separate copy of data from copy import deepcopy # Do not modify data, instead copy temp_data = deepcopy(data) done = False # Create the center dictionary, which will hold the center point # information (or best-fit) TODO # This dictionary will be updated in case it was too far from the # best-fit, and found a non positive-definite symmetric fisher matrix. center = {} if not command_line.bf: for elem in parameter_names: temp_data.mcmc_parameters[elem]['current'] = ( data.mcmc_parameters[elem]['initial'][0]) center[elem] = data.mcmc_parameters[elem]['initial'][0] else: read_args_from_bestfit(temp_data, command_line.bf) for elem in parameter_names: temp_data.mcmc_parameters[elem]['current'] = ( temp_data.mcmc_parameters[elem]['last_accepted']) center[elem] = temp_data.mcmc_parameters[elem]['last_accepted'] # Have a security index that prevents looping indefinitely security = 0 while not done and security < 10: security += 1 # Compute the Fisher matrix and the gradient array at the center # point. fisher_matrix, gradient = compute_fisher( temp_data, cosmo, center, 0.01) # Compute inverse of the fisher matrix, catch LinAlgError exception fisher_invert_success = True try: if not command_line.silent: print("Fisher matrix computed:") print(fisher_matrix) cov_matrix = np.linalg.inv(fisher_matrix) except np.linalg.LinAlgError: raise io_mp.ConfigurationError( "Could not find Fisher matrix, please remove the " "option --fisher and run with Metropolis-Hastings " "or another sampling method.") fisher_invert_success = False done = True # Write it to the file if fisher_invert_success: io_mp.write_covariance_matrix( cov_matrix, parameter_names, os.path.join(command_line.folder, 'covariance_fisher.mat')) command_line.cov = os.path.join( command_line.folder, 'covariance_fisher.mat') done = True # Check if the diagonal elements are non-negative for h, elem in enumerate(parameter_names): if cov_matrix[h][h] < 0: warnings.warn( "Covariance has negative values on diagonal, " "moving to a better point and repeating " "the Fisher computation") done = False break if not done: # Solve for a step step = np.dot(cov_matrix, gradient) # Now modify data_parameters TODO HERE update center for k, elem in enumerate(parameter_names): data.mcmc_parameters[elem]['initial'][0] = data.mcmc_parameters[elem]['initial'][0]-step[k] temp_data.mcmc_parameters[elem]['initial'][0] = temp_data.mcmc_parameters[elem]['initial'][0]-step[k] print "Moved %s to:"%(elem),data.mcmc_parameters[elem]['initial'][0] # if the user provides a .covmat file or if user asks to compute a fisher matrix if command_line.cov is not None: cov = open('{0}'.format(command_line.cov), 'r') i = 0 for line in cov: if line.find('#') != -1: # Extract the names from the first line covnames = line.strip('#').replace(' ', '').\ replace('\n', '').split(',') # Initialize the matrices matrix = np.zeros((len(covnames), len(covnames)), 'float64') rot = np.zeros((len(covnames), len(covnames))) else: line = line.split() for j in range(len(line)): matrix[i][j] = np.array(line[j], 'float64') i += 1 # First print out if not command_line.silent and not command_line.quiet: print('\nInput covariance matrix:') print(covnames) print(matrix) # Deal with the all problematic cases. # First, adjust the scales between stored parameters and the ones used # in mcmc scales = [] for elem in covnames: if elem in parameter_names: scales.append(data.mcmc_parameters[elem]['scale']) else: scales.append(1) scales = np.diag(scales) # Compute the inverse matrix, and assert that the computation was # precise enough, by comparing the product to the identity matrix. invscales = np.linalg.inv(scales) np.testing.assert_array_almost_equal( np.dot(scales, invscales), np.eye(np.shape(scales)[0]), decimal=5) # Apply the newly computed scales to the input matrix matrix = np.dot(invscales.T, np.dot(matrix, invscales)) # Second print out, after having applied the scale factors if not command_line.silent and not command_line.quiet: print('\nFirst treatment (scaling)') print(covnames) print(matrix) # Rotate matrix for the parameters to be well ordered, even if some # names are missing or some are in extra. # First, store the parameter names in temp_names that also appear in # the covariance matrix, in the right ordering for the code (might be # different from the input matri) temp_names = [elem for elem in parameter_names if elem in covnames] # If parameter_names contains less things than covnames, we will do a # small trick. Create a second temporary array, temp_names_2, that will # have the same dimension as covnames, and containing: # - the elements of temp_names, in the order of parameter_names (h # index) # - an empty string '' for the remaining unused parameters temp_names_2 = [] h = 0 not_in = [elem for elem in covnames if elem not in temp_names] for k in range(len(covnames)): if covnames[k] not in not_in: temp_names_2.append(temp_names[h]) h += 1 else: temp_names_2.append('') # Create the rotation matrix, that will put the covariance matrix in # the right order, and also assign zeros to the unused parameters from # the input. These empty columns will be removed in the next step. for k in range(len(covnames)): for h in range(len(covnames)): try: if covnames[k] == temp_names_2[h]: rot[h][k] = 1. else: rot[h][k] = 0. except __HOLE__: # The IndexError exception means that we are dealing with # an unused parameter. By enforcing the corresponding # rotation matrix element to 0, the resulting matrix will # still have the same size as the original, but with zeros # on the unused lines. rot[h][k] = 0. matrix = np.dot(rot, np.dot(matrix, np.transpose(rot))) # Third print out if not command_line.silent and not command_line.quiet: print('\nSecond treatment (partial reordering and cleaning)') print(temp_names_2) print(matrix) # Final step, creating a temporary matrix, filled with 1, that will # eventually contain the result. matrix_temp = np.ones((len(parameter_names), len(parameter_names)), 'float64') indices_final = np.zeros(len(parameter_names)) indices_initial = np.zeros(len(covnames)) # Remove names that are in parameter names but not in covnames, and # set to zero the corresponding columns of the final result. for k in range(len(parameter_names)): if parameter_names[k] in covnames: indices_final[k] = 1 for zeros in np.where(indices_final == 0)[0]: matrix_temp[zeros, :] = 0 matrix_temp[:, zeros] = 0 # Remove names that are in covnames but not in param_names for h in range(len(covnames)): if covnames[h] in parameter_names: indices_initial[h] = 1 # There, put a place holder number (we are using a pure imaginary # number: i, to avoid any problem) in the initial matrix, so that the # next step only copy the interesting part of the input to the final # matrix. max_value = np.finfo(np.float64).max for zeros in np.where(indices_initial == 0)[0]: matrix[zeros, :] = [max_value for _ in range( len(matrix[zeros, :]))] matrix[:, zeros] = [max_value for _ in range( len(matrix[:, zeros]))] # Now put in the temporary matrix, where the 1 were, the interesting # quantities from the input (the one that are not equal to i). matrix_temp[matrix_temp == 1] = matrix[matrix != max_value] matrix = np.copy(matrix_temp) # on all other lines, that contain 0, just use sigma^2 for zeros in np.where(indices_final == 0)[0]: matrix[zeros, zeros] = np.array( data.mcmc_parameters[parameter_names[zeros]]['initial'][3], 'float64')**2 # else, take sigmas^2. else: matrix = np.identity(len(parameter_names), 'float64') for index, elem in enumerate(parameter_names): matrix[index][index] = np.array( data.mcmc_parameters[elem]['initial'][3], 'float64')**2 # Final print out, the actually used covariance matrix if not command_line.silent and not command_line.quiet: sys.stdout.write('\nDeduced starting covariance matrix:\n') print(parameter_names) print(matrix) #inverse, and diagonalization eigv, eigV = np.linalg.eig(np.linalg.inv(matrix)) return eigv, eigV, matrix
IndexError
dataset/ETHPy150Open baudren/montepython_public/montepython/sampler.py/get_covariance_matrix
9,184
def compute_lkl(cosmo, data): """ Compute the likelihood, given the current point in parameter space. This function now performs a test before calling the cosmological model (**new in version 1.2**). If any cosmological parameter changed, the flag :code:`data.need_cosmo_update` will be set to :code:`True`, from the routine :func:`check_for_slow_step <data.Data.check_for_slow_step>`. Returns ------- loglike : float The log of the likelihood (:math:`\\frac{-\chi^2}2`) computed from the sum of the likelihoods of the experiments specified in the input parameter file. This function returns :attr:`data.boundary_loglkie <data.data.boundary_loglike>`, defined in the module :mod:`data` if *i)* the current point in the parameter space has hit a prior edge, or *ii)* the cosmological module failed to compute the model. This value is chosen to be extremly small (large negative value), so that the step will always be rejected. """ from classy import CosmoSevereError, CosmoComputationError # If the cosmological module has already been called once, and if the # cosmological parameters have changed, then clean up, and compute. if cosmo.state and data.need_cosmo_update is True: cosmo.struct_cleanup() # If the data needs to change, then do a normal call to the cosmological # compute function. Note that, even if need_cosmo update is True, this # function must be called if the jumping factor is set to zero. Indeed, # this means the code is called for only one point, to set the fiducial # model. if ((data.need_cosmo_update) or (not cosmo.state) or (data.jumping_factor == 0)): # Prepare the cosmological module with the new set of parameters cosmo.set(data.cosmo_arguments) # Compute the model, keeping track of the errors # In classy.pyx, we made use of two type of python errors, to handle # two different situations. # - CosmoSevereError is returned if a parameter was not properly set # during the initialisation (for instance, you entered Ommega_cdm # instead of Omega_cdm). Then, the code exits, to prevent running with # imaginary parameters. This behaviour is also used in case you want to # kill the process. # - CosmoComputationError is returned if Class fails to compute the # output given the parameter values. This will be considered as a valid # point, but with minimum likelihood, so will be rejected, resulting in # the choice of a new point. try: cosmo.compute(["lensing"]) except CosmoComputationError as failure_message: sys.stderr.write(str(failure_message)+'\n') sys.stderr.flush() return data.boundary_loglike except CosmoSevereError as critical_message: raise io_mp.CosmologicalModuleError( "Something went wrong when calling CLASS" + str(critical_message)) except __HOLE__: raise io_mp.CosmologicalModuleError( "You interrupted execution") # For each desired likelihood, compute its value against the theoretical # model loglike = 0 # This flag holds the information whether a fiducial model was written. In # this case, the log likelihood returned will be '1j', meaning the # imaginary number i. flag_wrote_fiducial = 0 for likelihood in data.lkl.itervalues(): if likelihood.need_update is True: value = likelihood.loglkl(cosmo, data) # Storing the result likelihood.backup_value = value # Otherwise, take the existing value else: value = likelihood.backup_value loglike += value # In case the fiducial file was written, store this information if value == 1j: flag_wrote_fiducial += 1 # Compute the derived parameters if relevant if data.get_mcmc_parameters(['derived']) != []: try: derived = cosmo.get_current_derived_parameters( data.get_mcmc_parameters(['derived'])) for name, value in derived.iteritems(): data.mcmc_parameters[name]['current'] = value except AttributeError: # This happens if the classy wrapper is still using the old # convention, expecting data as the input parameter cosmo.get_current_derived_parameters(data) except CosmoSevereError: raise io_mp.CosmologicalModuleError( "Could not write the current derived parameters") for elem in data.get_mcmc_parameters(['derived']): data.mcmc_parameters[elem]['current'] /= \ data.mcmc_parameters[elem]['scale'] # If fiducial files were created, inform the user, and exit if flag_wrote_fiducial > 0: if flag_wrote_fiducial == len(data.lkl): raise io_mp.FiducialModelWritten( "Fiducial file(s) was(were) created, please start a new chain") else: raise io_mp.FiducialModelWritten( "Some previously non-existing fiducial files were created, " + "but potentially not all of them. Please check now manually" + " on the headers, of the corresponding that all parameters " + "are coherent for your tested models") return loglike
KeyboardInterrupt
dataset/ETHPy150Open baudren/montepython_public/montepython/sampler.py/compute_lkl
9,185
def test_tb_across_threads(self): if not test_support.is_jython: return # http://bugs.jython.org/issue1533624 class PyRunnable(Runnable): def run(self): raise TypeError('this is only a test') try: EventQueue.invokeAndWait(PyRunnable()) except __HOLE__: self.assertEqual(tb_info(), [('test_tb_across_threads', 'EventQueue.invokeAndWait(PyRunnable())'), ('run', "raise TypeError('this is only a test')")]) else: self.fail('Expected TypeError')
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_traceback_jy.py/TracebackTestCase.test_tb_across_threads
9,186
def test_except_around_raising_call(self): """[ #452526 ] traceback lineno is the except line""" from test import except_in_raising_code try: except_in_raising_code.foo() except __HOLE__: tb = sys.exc_info()[2] self.assertEquals(6, tb.tb_next.tb_lineno) else: self.fail("Should've raised a NameError")
NameError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_traceback_jy.py/TracebackTestCase.test_except_around_raising_call
9,187
def _bindSocket(self): log.msg("%s starting on %s"%(self.protocol.__class__, self.interface)) try: fd, name = opentuntap(name=self.interface, ethernet=self.ethernet, packetinfo=0) except __HOLE__, e: raise error.CannotListenError, (None, self.interface, e) fdesc.setNonBlocking(fd) self.interface = name self.connected = 1 self.fd = fd
OSError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/pair/tuntap.py/TuntapPort._bindSocket
9,188
def doRead(self): """Called when my socket is ready for reading.""" read = 0 while read < self.maxThroughput: try: data = os.read(self.fd, self.maxPacketSize) read += len(data) # pkt = TuntapPacketInfo(data) self.protocol.datagramReceived(data, partial=0 # pkt.isPartial(), ) except OSError, e: if e.errno in (errno.EWOULDBLOCK,): return else: raise except __HOLE__, e: if e.errno in (errno.EAGAIN, errno.EINTR): return else: raise except: log.deferr()
IOError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/pair/tuntap.py/TuntapPort.doRead
9,189
def write(self, datagram): """Write a datagram.""" # header = makePacketInfo(0, 0) try: return os.write(self.fd, datagram) except __HOLE__, e: if e.errno == errno.EINTR: return self.write(datagram) elif e.errno == errno.EMSGSIZE: raise error.MessageLengthError, "message too long" elif e.errno == errno.ECONNREFUSED: raise error.ConnectionRefusedError else: raise
IOError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/pair/tuntap.py/TuntapPort.write
9,190
def _scan(self): plugins = [] dirs = [] # initial pass, read for name in os.listdir(self.root): if name in (".", "..",): continue path = os.path.join(self.root, name) if (os.path.isdir(path) and os.path.isfile(os.path.join(path, '__init__.py'))): # no conflict if path not in sys.path: dirs.append((name, path)) sys.path.insert(0, os.path.join(self.root, '..', name)) sys.path.insert(0, os.path.join(self.root, name)) for (name, d) in dirs: try: for f in os.listdir(d): if f.endswith(".py") and f != "__init__.py": plugins.append(("%s.%s" % (name, f[:-3]), d)) except __HOLE__: sys.stderr.write("error, loading %s" % f) sys.stderr.flush() continue mod = None for (name, d) in plugins: mod = importlib.import_module(name) if hasattr(mod, "__all__"): for attr in mod.__all__: plug = getattr(mod, attr) if issubclass(plug, Plugin): self._load_plugin(plug())
OSError
dataset/ETHPy150Open benoitc/gaffer/gaffer/gafferd/plugins.py/PluginDir._scan
9,191
def restart_apps(self, config, loop, manager): if not os.path.isdir(config.plugin_dir): # the new plugin dir isn't found logging.error("config error plugging dir %r not found" % config.plugin_dir) if self.plugin_dir != config.plugin_dir: logging.info("stop all current plugins") self.stop_apps() return # save all states old_plugins = self.plugins.copy() old_installed = self.installed old_apps = copy.copy(self.apps) old_plugin_dir = self.plugin_dir # scan the plugin dir self.plugin_dir = config.plugin_dir self.scan() try: self.check_mandatory() except __HOLE__ as e: # one dependency is missing, return logging.error("Failed to reload plugins: %s" % str(e)) if self.plugin_dir != old_plugin_dir: logging.info("stop all current plugins") self.stop_apps() # reset values self.plugin_dir = old_plugin_dir self.plugins = old_plugins self.installed = old_installed self.apps = old_apps return # initialize new apps apps = self.init_apps(config) # stop removed plugins for ap in old_apps: if ap not in apps: app, _ = ap try: app.stop() except Exception: # we ignore all exception logging.error('Uncaught exception when stopping a plugin', exc_info=True) # start or restart plugins for app, plug in apps: try: if (app, plug) in old_apps: app.restart() else: app.start(loop, manager) except Exception: # we ignore all exception logging.error('Uncaught exception when (re)starting a plugin', exc_info=True)
RuntimeError
dataset/ETHPy150Open benoitc/gaffer/gaffer/gafferd/plugins.py/PluginManager.restart_apps
9,192
def run(args): """ Prepares models from specified module for partitioning. :param dictionary args: (required). Dictionary of command arguments. """ names = [] module = args['module'][:-3] if args['module'].endswith('.py') else args['module'] try: module_clss = filter(lambda obj: isinstance(obj, type), __import__(module, fromlist=module).__dict__.values()) except __HOLE__ as e: raise ImportProblemError(str(e)) for cls in module_clss: if hasattr(cls, 'architect') and hasattr(cls.architect, 'partition'): cls.architect.partition.get_partition().prepare() names.append(cls.__name__) if not names: return 'unable to find any partitionable models in a module: {0}'.format(module) else: return 'successfully (re)configured the database for the following models: {0}'.format(', '.join(names))
ImportError
dataset/ETHPy150Open maxtepkeev/architect/architect/commands/partition.py/run
9,193
def __init__(self, request, domain, export_id=None, minimal=False): self.request = request self.domain = domain self.presave = False self.transform_dates = False self.creating_new_export = not bool(export_id) self.minimal = minimal if export_id: self.custom_export = self.ExportSchemaClass.get(export_id) # also update the schema to include potential new stuff self.custom_export.update_schema() # enable configuring saved exports from this page saved_group = HQGroupExportConfiguration.get_for_domain(self.domain) self.presave = export_id in saved_group.custom_export_ids self.export_stock = self.has_stock_column() try: assert self.custom_export.doc_type == 'SavedExportSchema', 'bad export doc type' assert self.custom_export.type == self.export_type, 'wrong export type specified' assert self.custom_export.index[0] == domain, 'bad export doc domain' except __HOLE__, e: raise BadExportConfiguration(str(e)) else: self.custom_export = self.ExportSchemaClass(type=self.export_type) self.export_stock = False
AssertionError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/export/custom_export_helpers.py/CustomExportHelper.__init__
9,194
def isinstance(obj, type_or_seq): try: return _isinstance(obj, type_or_seq) except __HOLE__: for t in type_or_seq: if _isinstance(obj, t): return 1 return 0
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/xml/dom/minicompat.py/isinstance
9,195
def __getattr__(self, key): if key.startswith("_"): raise AttributeError, key try: get = getattr(self, "_get_" + key) except __HOLE__: raise AttributeError, key return get()
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/xml/dom/minicompat.py/GetattrMagic.__getattr__
9,196
def ValidateAndLog(self, log): """Validates an @ time spec line and logs any errors and warnings. Args: log: A LogCounter instance to record issues. """ self._CheckTimeField(log) # User checks. if self.user in USER_WHITELIST: return elif len(self.user) > 31: log.LineError(log.MSG_INVALID_USER, 'Username too long "%s"' % self.user) elif self.user.startswith('-'): log.LineError(log.MSG_INVALID_USER, 'Invalid username "%s"' % self.user) elif re.search(r'[\s!"#$%&\'()*+,/:;<=>?@[\\\]^`{|}~]', self.user): log.LineError(log.MSG_INVALID_USER, 'Invalid username "%s"' % self.user) else: try: pwd.getpwnam(self.user) except __HOLE__: log.LineWarn(log.MSG_USER_NOT_FOUND, 'User "%s" not found.' % self.user) # Command checks. if self.command.startswith('%') or re.search(r'[^\\]%', self.command): log.LineWarn(log.MSG_BARE_PERCENT, 'A bare % is a line break in' ' crontab and is commonly not intended.')
KeyError
dataset/ETHPy150Open lyda/chkcrontab/chkcrontab_lib.py/CronLineTimeAction.ValidateAndLog
9,197
def get_last_update_of_model(self, model, **kwargs): """ Return the last time a given model's items were updated. Returns the epoch if the items were never updated. """ qs = self.get_for_model(model) if kwargs: qs = qs.filter(**kwargs) try: return qs.order_by('-timestamp')[0].timestamp except __HOLE__: return datetime.datetime.fromtimestamp(0)
IndexError
dataset/ETHPy150Open jacobian-archive/jellyroll/src/jellyroll/managers.py/ItemManager.get_last_update_of_model
9,198
def __from__(path): try: _import = path.split('.')[-1] _from = u".".join(path.split('.')[:-1]) return getattr(__import__(_from, fromlist=[_import]), _import) except __HOLE__: return object
TypeError
dataset/ETHPy150Open mining/mining/mining/utils/__init__.py/__from__
9,199
def check_auth(users, encrypt=None, realm=None): """If an authorization header contains credentials, return True, else False.""" request = cherrypy.serving.request if 'authorization' in request.headers: # make sure the provided credentials are correctly set ah = httpauth.parseAuthorization(request.headers['authorization']) if ah is None: raise cherrypy.HTTPError(400, 'Bad Request') if not encrypt: encrypt = httpauth.DIGEST_AUTH_ENCODERS[httpauth.MD5] if hasattr(users, '__call__'): try: # backward compatibility users = users() # expect it to return a dictionary if not isinstance(users, dict): raise ValueError("Authentication users must be a dictionary") # fetch the user password password = users.get(ah["username"], None) except __HOLE__: # returns a password (encrypted or clear text) password = users(ah["username"]) else: if not isinstance(users, dict): raise ValueError("Authentication users must be a dictionary") # fetch the user password password = users.get(ah["username"], None) # validate the authorization by re-computing it here # and compare it with what the user-agent provided if httpauth.checkResponse(ah, password, method=request.method, encrypt=encrypt, realm=realm): request.login = ah["username"] return True request.login = False return False
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/cherrypy/cherrypy/lib/auth.py/check_auth