Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
6,800
def unregister_webapi_capabilities(capabilities_id): """Unregisters a previously registered set of web API capabilities.""" try: del _registered_capabilities[capabilities_id] except __HOLE__: logging.error('Failed to unregister unknown web API capabilities ' '"%s".', capabilities_id) raise KeyError('"%s" is not a registered web API capabilities set' % capabilities_id)
KeyError
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/server_info.py/unregister_webapi_capabilities
6,801
def unload(self): self.disable() self._remove_all_event_handlers() try: self.machine.game.player.uvars['logic_blocks'].remove(self) except __HOLE__: pass
KeyError
dataset/ETHPy150Open missionpinball/mpf/mpf/system/logic_blocks.py/LogicBlock.unload
6,802
def getName(self): try: return self.layouts[XML_TYPE]['name'] except __HOLE__: pass return Object.getName(self)
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/xml_gen_objects.py/XMLObject.getName
6,803
def getNodeType(self): try: return self.layouts[XML_TYPE]['nodeType'] except __HOLE__: pass return 'xs:element'
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/xml_gen_objects.py/XMLObject.getNodeType
6,804
def getName(self): try: return self.specs[XML_TYPE]['name'] except __HOLE__: pass return Property.getName(self)
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/xml_gen_objects.py/XMLProperty.getName
6,805
def getNodeType(self): try: return self.specs[XML_TYPE]['nodeType'] except __HOLE__: pass return 'xs:attribute'
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/xml_gen_objects.py/XMLProperty.getNodeType
6,806
def getAttributeType(self): try: return self.specs[XML_TYPE]['type'] except __HOLE__: pass return 'xs:string'
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/xml_gen_objects.py/XMLProperty.getAttributeType
6,807
def getAttributeUse(self): try: return self.specs[XML_TYPE]['use'] except __HOLE__: pass return None
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/xml_gen_objects.py/XMLProperty.getAttributeUse
6,808
def getChoice(self): try: return self.specs[XML_TYPE]['choice'] except __HOLE__: pass return None
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/xml_gen_objects.py/XMLProperty.getChoice
6,809
def isInferred(self): try: return self.specs[XML_TYPE]['inferred'] == 'true' except __HOLE__: pass return False
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/xml_gen_objects.py/XMLProperty.isInferred
6,810
def __get__(self, instance, owner): arg = instance.arguments[self.name] try: value = instance.__dict__[arg.name] except __HOLE__: raise AttributeError(self.name) key = hash(instance) if data.instance_stack[key]: instance = data.instance_stack[key][-1] if arg.resolve_field and hasattr(value, 'resolve'): value = value.resolve(instance) elif hasattr(value, '__call__'): value = value(instance) return value
KeyError
dataset/ETHPy150Open gulopine/steel/steel/common/args.py/Argument.__get__
6,811
def attach_to_class(self, cls): # Remove the argument from the class try: del cls.arguments[self.name] except __HOLE__: raise TypeError("%r is not an argument, so it can't be removed" % self.name)
KeyError
dataset/ETHPy150Open gulopine/steel/steel/common/args.py/Removed.attach_to_class
6,812
def parse_args(self): """Convert fields and filters from strings to list, dicts""" if isinstance(self.fields, basestring): if self.fields == "*": self.fields = ["*"] else: try: self.fields = json.loads(self.fields) except __HOLE__: self.fields = [f.strip() for f in self.fields.split(",")] for filter_name in ["filters", "or_filters"]: filters = getattr(self, filter_name) if isinstance(filters, basestring): filters = json.loads(filters) if isinstance(filters, dict): fdict = filters filters = [] for key, value in fdict.iteritems(): filters.append(make_filter_tuple(self.doctype, key, value)) setattr(self, filter_name, filters)
ValueError
dataset/ETHPy150Open frappe/frappe/frappe/model/db_query.py/DatabaseQuery.parse_args
6,813
def get_best_configuration(self): """ The best configuration found so far. From the current tuning run only. """ try: return self.search_driver.best_result.configuration.data except __HOLE__: return None
AttributeError
dataset/ETHPy150Open jansel/opentuner/opentuner/api.py/TuningRunManager.get_best_configuration
6,814
def start_plugin_future(self, chname, opname, future, alreadyOpenOk=True): try: pInfo = self.getPluginInfo(opname) except __HOLE__: self.fv.show_error("No plugin information for plugin '%s'" % ( opname)) return if chname is not None: # local plugin plname = chname.upper() + ': ' + pInfo.name else: # global plugin plname = pInfo.name lname = pInfo.name.lower() if lname in self.active: if alreadyOpenOk: self.set_focus(pInfo.name) return raise PluginManagerError("Plugin %s is already active." % ( plname)) # Raise tab with GUI pInfo.tabname = pInfo.spec.get('tab', plname) vbox = None had_error = False try: if hasattr(pInfo.obj, 'build_gui'): vbox = Widgets.VBox() in_ws = pInfo.spec.ws if in_ws.startswith('in:'): # TODO: how to set this size appropriately # Which plugins are actually using this attribute? vbox.size = (400, 900) else: # attach size of workspace to container so plugin # can plan for how to configure itself wd, ht = self.ds.get_ws_size(in_ws) vbox.size = (wd, ht) if future: pInfo.obj.build_gui(vbox, future=future) else: pInfo.obj.build_gui(vbox) except Exception as e: errstr = "Plugin UI failed to initialize: %s" % ( str(e)) self.logger.error(errstr) try: (type, value, tb) = sys.exc_info() tb_str = "".join(traceback.format_tb(tb)) self.logger.error("Traceback:\n%s" % (tb_str)) except Exception as e: tb_str = "Traceback information unavailable." self.logger.error(tb_str) self.plugin_build_error(vbox, errstr + '\n' + tb_str) #raise PluginManagerError(e) if not had_error: try: if future: pInfo.obj.start(future=future) else: pInfo.obj.start() except Exception as e: had_error = True errstr = "Plugin failed to start correctly: %s" % ( str(e)) self.logger.error(errstr) try: (type, value, tb) = sys.exc_info() tb_str = "".join(traceback.format_tb(tb)) self.logger.error("Traceback:\n%s" % (tb_str)) except Exception as e: tb_str = "Traceback information unavailable." self.logger.error(tb_str) self.plugin_build_error(vbox, errstr + '\n' + tb_str) #raise PluginManagerError(e) if vbox is not None: self.finish_gui(pInfo, vbox) self.activate(pInfo) self.set_focus(pInfo.name) else: # If this is a local plugin, raise the channel associated with the # plug in if pInfo.chinfo is not None: itab = pInfo.chinfo.name self.ds.raise_tab(itab)
KeyError
dataset/ETHPy150Open ejeschke/ginga/ginga/gw/PluginManager.py/PluginManager.start_plugin_future
6,815
def tab_switched_cb(self, tab_w, widget): # A tab in a workspace in which we started a plugin has been # raised. Check for this widget and focus the plugin title = widget.extdata.get('tab_title', None) if title is not None: # is this a local plugin tab? if ':' in title: chname, plname = title.split(':') plname = plname.strip() try: info = self.get_info(plname) except __HOLE__: # no return pInfo = info.pInfo # important: make sure channel matches ours! if pInfo.tabname == title: if self.is_active(pInfo.name): if not self.has_focus(pInfo.name): self.set_focus(pInfo.name) elif pInfo.chinfo is not None: # raise the channel associated with the plugin itab = pInfo.chinfo.name self.ds.raise_tab(itab)
KeyError
dataset/ETHPy150Open ejeschke/ginga/ginga/gw/PluginManager.py/PluginManager.tab_switched_cb
6,816
def file_size(filename): ''' Obtains the size of a given file. @filename - Path to the file. Returns the size of the file. ''' # Using open/lseek works on both regular files and block devices fd = os.open(filename, os.O_RDONLY) try: return os.lseek(fd, 0, os.SEEK_END) except __HOLE__ as e: raise e except Exception as e: raise Exception("file_size failed to obtain the size of '%s': %s" % (filename, str(e))) finally: os.close(fd)
KeyboardInterrupt
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/core/common.py/file_size
6,817
def get_quoted_strings(string): ''' Returns a string comprised of all data in between double quotes. @string - String to get quoted data from. Returns a string of quoted data on success. Returns a blank string if no quoted data is present. ''' try: # This regex grabs all quoted data from string. # Note that this gets everything in between the first and last double quote. # This is intentional, as printed (and quoted) strings from a target file may contain # double quotes, and this function should ignore those. However, it also means that any # data between two quoted strings (ex: '"quote 1" non-quoted data "quote 2"') will also be included. return re.findall(r'\"(.*)\"', string)[0] except __HOLE__ as e: raise e except Exception: return ''
KeyboardInterrupt
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/core/common.py/get_quoted_strings
6,818
def __init__(self, expression): self.expression = expression self.value = None if expression: try: self.value = self.evaluate(self.expression) except __HOLE__ as e: raise e except Exception as e: pass
KeyboardInterrupt
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/core/common.py/MathExpression.__init__
6,819
def BlockFile(fname, mode='r', subclass=io.FileIO, **kwargs): # Defining a class inside a function allows it to be dynamically subclassed class InternalBlockFile(subclass): ''' Abstraction class for accessing binary files. This class overrides io.FilIO's read and write methods. This guaruntees two things: 1. All requested data will be read/written via the read and write methods. 2. All reads return a str object and all writes can accept either a str or a bytes object, regardless of the Python interpreter version. However, the downside is that other io.FileIO methods won't work properly in Python 3, namely things that are wrappers around self.read (e.g., readline, readlines, etc). This class also provides a read_block method, which is used by binwalk to read in a block of data, plus some additional data (DEFAULT_BLOCK_PEEK_SIZE), but on the next block read pick up at the end of the previous data block (not the end of the additional data). This is necessary for scans where a signature may span a block boundary. The descision to force read to return a str object instead of a bytes object is questionable for Python 3, but it seemed the best way to abstract differences in Python 2/3 from the rest of the code (especially for people writing plugins) and to add Python 3 support with minimal code change. ''' # The DEFAULT_BLOCK_PEEK_SIZE limits the amount of data available to a signature. # While most headers/signatures are far less than this value, some may reference # pointers in the header structure which may point well beyond the header itself. # Passing the entire remaining buffer to libmagic is resource intensive and will # significantly slow the scan; this value represents a reasonable buffer size to # pass to libmagic which will not drastically affect scan time. DEFAULT_BLOCK_PEEK_SIZE = 8 * 1024 # Max number of bytes to process at one time. This needs to be large enough to # limit disk I/O, but small enough to limit the size of processed data blocks. DEFAULT_BLOCK_READ_SIZE = 1 * 1024 * 1024 def __init__(self, fname, mode='r', length=0, offset=0, block=DEFAULT_BLOCK_READ_SIZE, peek=DEFAULT_BLOCK_PEEK_SIZE, swap=0): ''' Class constructor. @fname - Path to the file to be opened. @mode - Mode to open the file in (default: 'r'). @length - Maximum number of bytes to read from the file via self.block_read(). @offset - Offset at which to start reading from the file. @block - Size of data block to read (excluding any trailing size), @peek - Size of trailing data to append to the end of each block. @swap - Swap every n bytes of data. Returns None. ''' self.total_read = 0 self.block_read_size = self.DEFAULT_BLOCK_READ_SIZE self.block_peek_size = self.DEFAULT_BLOCK_PEEK_SIZE # This is so that custom parent classes can access/modify arguments as necessary self.args = GenericContainer(fname=fname, mode=mode, length=length, offset=offset, block=block, peek=peek, swap=swap, size=0) # Python 2.6 doesn't like modes like 'rb' or 'wb' mode = self.args.mode.replace('b', '') super(self.__class__, self).__init__(fname, mode) self.swap_size = self.args.swap if self.args.size: self.size = self.args.size else: try: self.size = file_size(self.args.fname) except KeyboardInterrupt as e: raise e except Exception: self.size = 0 if self.args.offset < 0: self.offset = self.size + self.args.offset else: self.offset = self.args.offset if self.offset < 0: self.offset = 0 elif self.offset > self.size: self.offset = self.size if self.args.offset < 0: self.length = self.args.offset * -1 elif self.args.length: self.length = self.args.length else: self.length = self.size - self.args.offset if self.length < 0: self.length = 0 elif self.length > self.size: self.length = self.size if self.args.block is not None: self.block_read_size = self.args.block self.base_block_size = self.block_read_size if self.args.peek is not None: self.block_peek_size = self.args.peek self.base_peek_size = self.block_peek_size # Work around for python 2.6 where FileIO._name is not defined try: self.name except __HOLE__: self._name = fname self.path = os.path.abspath(self.name) self.seek(self.offset) def _swap_data_block(self, block): ''' Reverses every self.swap_size bytes inside the specified data block. Size of data block must be a multiple of self.swap_size. @block - The data block to swap. Returns a swapped string. ''' i = 0 data = "" if self.swap_size > 0: while i < len(block): data += block[i:i+self.swap_size][::-1] i += self.swap_size else: data = block return data def reset(self): self.set_block_size(block=self.base_block_size, peek=self.base_peek_size) self.seek(self.offset) def set_block_size(self, block=None, peek=None): if block is not None: self.block_read_size = block if peek is not None: self.block_peek_size = peek def write(self, data): ''' Writes data to the opened file. io.FileIO.write does not guaruntee that all data will be written; this method overrides io.FileIO.write and does guaruntee that all data will be written. Returns the number of bytes written. ''' n = 0 l = len(data) data = str2bytes(data) while n < l: n += super(self.__class__, self).write(data[n:]) return n def read(self, n=-1): '''' Reads up to n bytes of data (or to EOF if n is not specified). Will not read more than self.length bytes. io.FileIO.read does not guaruntee that all requested data will be read; this method overrides io.FileIO.read and does guaruntee that all data will be read. Returns a str object containing the read data. ''' l = 0 data = b'' if self.total_read < self.length: # Don't read more than self.length bytes from the file if (self.total_read + n) > self.length: n = self.length - self.total_read while n < 0 or l < n: tmp = super(self.__class__, self).read(n-l) if tmp: data += tmp l += len(tmp) else: break self.total_read += len(data) return self._swap_data_block(bytes2str(data)) def peek(self, n=-1): ''' Peeks at data in file. ''' pos = self.tell() data = self.read(n) self.seek(pos) return data def seek(self, n, whence=os.SEEK_SET): if whence == os.SEEK_SET: self.total_read = n - self.offset elif whence == os.SEEK_CUR: self.total_read += n elif whence == os.SEEK_END: self.total_read = self.size + n super(self.__class__, self).seek(n, whence) def read_block(self): ''' Reads in a block of data from the target file. Returns a tuple of (str(file block data), block data length). ''' data = self.read(self.block_read_size) dlen = len(data) data += self.peek(self.block_peek_size) return (data, dlen) return InternalBlockFile(fname, mode=mode, **kwargs)
AttributeError
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/core/common.py/BlockFile
6,820
def _compute_move_for_insert(self, itag, ii1, ii2, ij1, ij2, imeta): # Store some state on the range we'll be working with inside this # insert group. # # i_move_cur is the current location inside the insert group # (from ij1 through ij2). # # i_move_range is the current range of consecutive lines that # we'll use for a move. Each line in this range has a # corresponding consecutive delete line. # # r_move_ranges represents deleted move ranges. The key is a # string in the form of "{i1}-{i2}-{j1}-{j2}", with those # positions taken from the remove group for the line. The value # is an instance of MoveRange. The values in MoveRange are used to # quickly locate deleted lines we've found that match the inserted # lines, so we can assemble ranges later. i_move_cur = ij1 i_move_range = MoveRange(i_move_cur, i_move_cur) r_move_ranges = {} # key -> (start, end, group) move_key = None is_replace = (itag == 'replace') # Loop through every location from ij1 through ij2 - 1 until we've # reached the end. while i_move_cur < ij2: try: iline = self.differ.b[i_move_cur].strip() except __HOLE__: iline = None updated_range = False if iline and iline in self.removes: # The inserted line at this location has a corresponding # removed line. # # If there's already some information on removed line ranges # for this particular move block we're processing then we'll # update the range. # # The way we do that is to find each removed line that matches # this inserted line, and for each of those find out if there's # an existing move range that the found removed line # immediately follows. If there is, we update the existing # range. # # If there isn't any move information for this line, we'll # simply add it to the move ranges. for ri, rgroup, rgroup_index in self.removes.get(iline, []): r_move_range = r_move_ranges.get(move_key) if not r_move_range or ri != r_move_range.end + 1: # We either didn't have a previous range, or this # group didn't immediately follow it, so we need # to start a new one. move_key = '%s-%s-%s-%s' % rgroup[1:5] r_move_range = r_move_ranges.get(move_key) if r_move_range: # If the remove information for the line is next in # the sequence for this calculated move range... if ri == r_move_range.end + 1: # This is part of the current range, so update # the end of the range to include it. r_move_range.end = ri r_move_range.add_group(rgroup, rgroup_index) updated_range = True else: # Check that this isn't a replace line that's just # "replacing" itself (which would happen if it's just # changing whitespace). if not is_replace or i_move_cur - ij1 != ri - ii1: # We don't have any move ranges yet, or we're done # with the existing range, so it's time to build # one based on any removed lines we find that # match the inserted line. r_move_ranges[move_key] = \ MoveRange(ri, ri, [(rgroup, rgroup_index)]) updated_range = True if not updated_range and r_move_ranges: # We didn't find a move range that this line is a part # of, but we do have some existing move ranges stored. # # Given that updated_range is set, we'll be processing # the known move ranges below. We'll actually want to # re-check this line afterward, so that we can start a # new move range after we've finished processing the # current ones. # # To do that, just i_move_cur back by one. That negates # the increment below. i_move_cur -= 1 move_key = None elif iline == '' and move_key: # This is a blank or whitespace-only line, which would not # be in the list of removed lines above. We also have been # working on a move range. # # At this point, the plan is to just attach this blank # line onto the end of the last range being operated on. # # This blank line will help tie together adjacent move # ranges. If it turns out to be a trailing line, it'll be # stripped later in _determine_move_range. r_move_range = r_move_ranges.get(move_key) if r_move_range: new_end_i = r_move_range.end + 1 if (new_end_i < len(self.differ.a) and self.differ.a[new_end_i].strip() == ''): # There was a matching blank line on the other end # of the range, so we should feel more confident about # adding the blank line here. r_move_range.end = new_end_i # It's possible that this blank line is actually an # "equal" line. Though technically it didn't move, # we're trying to create a logical, seamless move # range, so we need to try to find that group and # add it to the list of groups in the range, if it' # not already there. last_group, last_group_index = r_move_range.last_group if new_end_i >= last_group[2]: # This is in the next group, which hasn't been # added yet. So add it. cur_group_index = r_move_range.last_group[1] + 1 r_move_range.add_group( self.groups[cur_group_index], cur_group_index) updated_range = True i_move_cur += 1 if not updated_range or i_move_cur == ij2: # We've reached the very end of the insert group. See if # we have anything that looks like a move. if r_move_ranges: r_move_range = self._find_longest_move_range(r_move_ranges) # If we have a move range, see if it's one we want to # include or filter out. Some moves are not impressive # enough to display. For example, a small portion of a # comment, or whitespace-only changes. r_move_range = self._determine_move_range(r_move_range) if r_move_range: # Rebuild the insert and remove ranges based on where # we are now and which range we won. # # The new ranges will be actual lists of positions, # rather than a beginning and end. These will be # provided to the renderer. # # The ranges expected by the renderers are 1-based, # whereas our calculations for this algorithm are # 0-based, so we add 1 to the numbers. # # The upper boundaries passed to the range() function # must actually be one higher than the value we want. # So, for r_move_range, we actually increment by 2. We # only increment i_move_cur by one, because i_move_cur # already factored in the + 1 by being at the end of # the while loop. i_range = range(i_move_range.start + 1, i_move_cur + 1) r_range = range(r_move_range.start + 1, r_move_range.end + 2) moved_to_ranges = dict(zip(r_range, i_range)) for group, group_index in r_move_range.groups: rmeta = group[-1] rmeta.setdefault('moved-to', {}).update( moved_to_ranges) imeta.setdefault('moved-from', {}).update( dict(zip(i_range, r_range))) # Reset the state for the next range. move_key = None i_move_range = MoveRange(i_move_cur, i_move_cur) r_move_ranges = {}
IndexError
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/diffviewer/opcode_generator.py/DiffOpcodeGenerator._compute_move_for_insert
6,821
def _get_detailed_endpoint_information(self, host, ip, from_cache='off'): url = '{api_url}/getEndpointData?host={host}&s={endpoint_ip}&fromCache={from_cache}'.format( api_url=self.API_URL, host=host, endpoint_ip=ip, from_cache=from_cache ) while True: try: response = self._handle_api_error(requests.get(url)).json() print('[INFO] [{ip_address}] Progress: {progress}%, Status: {status}'.format( ip_address=response.get('ipAddress'), progress='{}'.format(response.get('progress')) if response.get('progress') > -1 else '0', status=response.get('statusDetailsMessage') ) ) if response.get('progress') == 100: return elif response.get('progress') < 0: time.sleep(10) else: time.sleep(5) except __HOLE__: return except Exception as e: if self.DEBUG: print(e) time.sleep(5) continue
KeyboardInterrupt
dataset/ETHPy150Open takeshixx/python-ssllabs/ssllabs.py/SSLLabsAssessment._get_detailed_endpoint_information
6,822
def analyze(self, host=None, publish='off', start_new='on', from_cache='off', max_age=5, return_all='done', ignore_mismatch='on', resume=False, *args, **kwargs): """Start the assessment process. This is basically a wrapper function for all the API communication which takes care of everything. Any non-default behaviour of assessment processes can be tweaked with arguments to this function. Providing a *host* containing the FQDN of the target system(s) is the only mandatory argument. All remaining arguments are optional. """ if not self._check_api_info(): return False if host: self.host = host elif not self.host: return False self.publish = publish self.start_new = start_new self.return_all = return_all self.from_cache = from_cache self.max_age = max_age self.ignore_mismatch = ignore_mismatch if not resume: if not self.QUIET: print('[INFO] Retrieving assessment for {}...'.format(self.host)) if not self._trigger_new_assessment(): return False else: if not self.QUIET: print('[INFO] Checking running assessment for {}'.format(self.host)) while True: _status = self._poll_api() if _status.get('status') == 'IN_PROGRESS': if not self.QUIET and resume: print('[INFO] Assessment is still in progress') break elif _status.get('status') == 'READY': if not self.QUIET and resume: print( '[INFO] No running assessment. Use --use-cache '+ 'to receive a cached assessment, or start a new one.' ) return else: return self._get_all_results() elif _status.get('status') == 'ERROR': print('An error occured: {}'.format(_status.errors)) return else: continue if self.VERBOSE: print('[INFO] Testing {} host(s)'.format(len(_status.get('endpoints')))) self.manager = multiprocessing.Manager() self.endpoint_jobs = [] try: if self.VERBOSE: for endpoint in _status.get('endpoints'): _process = multiprocessing.Process( target=self._get_detailed_endpoint_information, args=(self.host, endpoint.get('ipAddress')) ) self.endpoint_jobs.append(_process) _process.start() for job in self.endpoint_jobs: job.join() while True: _status = self._poll_api() if not _status: break _host_status = _status.get('status') if _host_status == 'IN_PROGRESS': if not self.QUIET: sys.stdout.write('.') sys.stdout.flush() time.sleep(10) elif _host_status == 'READY': return self._get_all_results() elif _host_status == 'ERROR': print('[ERROR] An error occured: {}'.format(_status.errors)) return elif _host_status == 'DNS': if self.VERBOSE: print('[INFO] Resolving hostname') time.sleep(4) else: print('[INFO] Unknown host status: {}'.format(_host_status)) except __HOLE__: pass except: return
KeyboardInterrupt
dataset/ETHPy150Open takeshixx/python-ssllabs/ssllabs.py/SSLLabsAssessment.analyze
6,823
def _pg_run(args, env=None, stdout=PIPE): try: p = Popen(args, env=env, stdout=stdout, stderr=STDOUT) stdout, _ = p.communicate() if p.returncode != 0: raise Exception("command failed: %s\nOUTPUT:\n%s" % (' '.join(args), stdout)) except __HOLE__ as exc: if exc.errno != errno.ENOENT: raise # carefully convert a common exception message into something more informative raise Exception("""Could not find the binary %s. This is probably because postgresql executables are not on the path.Try setting the PATH variable to include the postgresql executables. On Debian Linux with PostgreSQL you could run PATH=/usr/lib/postgresql/X.Y/bin:$PATH""" % args[0]) return stdout
OSError
dataset/ETHPy150Open jinty/van.pg/van/pg/_cluster.py/_pg_run
6,824
def createdb(self, template=None): assert template is None or template.startswith('test_db'), template assert template is None or int(template[7:]) <= self._db_counter, (template, self._db_counter) dbs = self._db_preload.get(template, []) try: dbname = dbs.pop() except __HOLE__: dbname = None if dbname is None: dbname = self._next_dbname() self._createdb(dbname, template) if len(dbs) <= self._max_prepared and template is not None and \ (self._is_bg_thread is None or not self._is_bg_thread.isAlive()): # NOTE: we only prepare templated databases if we can if self._is_bg_thread is not None: self._is_bg_thread.join() # make very sure the current bg thread is finished preload_dbname = self._next_dbname() if template not in self._db_preload: self._db_preload[template] = [] self._is_bg_thread = threading.Thread(target=self._preload, args=(preload_dbname, template, )) self._is_bg_thread.start() return dbname
IndexError
dataset/ETHPy150Open jinty/van.pg/van/pg/_cluster.py/RunningCluster.createdb
6,825
def test_issue1825(self): os.remove(test_support.TESTFN) testfnu = unicode(test_support.TESTFN) try: os.open(testfnu, os.O_RDONLY) except __HOLE__, e: self.assertTrue(isinstance(e.filename, unicode)) self.assertEqual(e.filename, testfnu) else: self.assertTrue(False) # XXX: currently fail #for fn in os.chdir, os.listdir, os.rmdir: for fn in (os.rmdir,): try: fn(testfnu) except OSError, e: self.assertTrue(isinstance(e.filename, unicode)) self.assertEqual(e.filename, testfnu) else: self.assertTrue(False)
OSError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_os_jy.py/OSTestCase.test_issue1825
6,826
def irc_command_333(self, command, args, nick, user, host): """ 333 - Channel Topic (Extended) Syntax: 333 channel nickname time """ chan, nickname, time = args[-3:] if chan in self._channels: self._channels[chan].topic_setter = nickname try: self._channels[chan].topic_time = int(time) except __HOLE__: pass
ValueError
dataset/ETHPy150Open ecdavis/pants/pants/contrib/irc.py/IRCClient.irc_command_333
6,827
def decode(data): for codec in CODECS: try: return data.decode(codec) except __HOLE__: continue return data.decode('utf-8', 'ignore')
UnicodeDecodeError
dataset/ETHPy150Open ecdavis/pants/pants/contrib/irc.py/decode
6,828
def get_data_hardread(self, ts_list, symbol_list, data_item, verbose=False, bIncDelist=False): ''' Read data into a DataFrame no matter what. @param ts_list: List of timestamps for which the data values are needed. Timestamps must be sorted. @param symbol_list: The list of symbols for which the data values are needed @param data_item: The data_item needed. Like open, close, volume etc. May be a list, in which case a list of DataFrame is returned. @param bIncDelist: If true, delisted securities will be included. @note: If a symbol is not found then a message is printed. All the values in the column for that stock will be NaN. Execution then continues as usual. No errors are raised at the moment. ''' ''' Now support lists of items, still support old string behaviour ''' bStr = False if( isinstance( data_item, str) ): data_item = [data_item] bStr = True # init data struct - list of arrays, each member is an array corresponding do a different data type # arrays contain n rows for the timestamps and m columns for each stock all_stocks_data = [] for i in range( len(data_item) ): all_stocks_data.append( np.zeros ((len(ts_list), len(symbol_list))) ); all_stocks_data[i][:][:] = np.NAN list_index= [] ''' For each item in the list, add to list_index (later used to delete non-used items) ''' for sItem in data_item: if( self.source == DataSource.CUSTOM ) : ''' If custom just load what you can ''' if (sItem == DataItem.CLOSE): list_index.append(1) elif (sItem == DataItem.ACTUAL_CLOSE): list_index.append(2) if( self.source == DataSource.COMPUSTAT ): ''' If compustat, look through list of features ''' for i, sLabel in enumerate(DataItem.COMPUSTAT): if sItem == sLabel: ''' First item is date index, labels start at 1 index ''' list_index.append(i+1) break else: raise ValueError ("Incorrect value for data_item %s"%sItem) if( self.source == DataSource.NORGATE ): if (sItem == DataItem.OPEN): list_index.append(1) elif (sItem == DataItem.HIGH): list_index.append (2) elif (sItem ==DataItem.LOW): list_index.append(3) elif (sItem == DataItem.CLOSE): list_index.append(4) elif(sItem == DataItem.VOL): list_index.append(5) elif (sItem == DataItem.ACTUAL_CLOSE): list_index.append(6) else: #incorrect value raise ValueError ("Incorrect value for data_item %s"%sItem) if( self.source == DataSource.MLT or self.source == DataSource.YAHOO): if (sItem == DataItem.OPEN): list_index.append(1) elif (sItem == DataItem.HIGH): list_index.append (2) elif (sItem ==DataItem.LOW): list_index.append(3) elif (sItem == DataItem.ACTUAL_CLOSE): list_index.append(4) elif(sItem == DataItem.VOL): list_index.append(5) elif (sItem == DataItem.CLOSE): list_index.append(6) else: #incorrect value raise ValueError ("Incorrect value for data_item %s"%sItem) #end elif #end data_item loop #read in data for a stock symbol_ctr=-1 for symbol in symbol_list: _file = None symbol_ctr = symbol_ctr + 1 #print self.getPathOfFile(symbol) try: if (self.source == DataSource.CUSTOM) or (self.source == DataSource.MLT)or (self.source == DataSource.YAHOO): file_path= self.getPathOfCSVFile(symbol); else: file_path= self.getPathOfFile(symbol); ''' Get list of other files if we also want to include delisted ''' if bIncDelist: lsDelPaths = self.getPathOfFile( symbol, True ) if file_path == None and len(lsDelPaths) > 0: print 'Found delisted paths:', lsDelPaths ''' If we don't have a file path continue... unless we have delisted paths ''' if (type (file_path) != type ("random string")): if bIncDelist == False or len(lsDelPaths) == 0: continue; #File not found if not file_path == None: _file = open(file_path, "rb") except __HOLE__: # If unable to read then continue. The value for this stock will be nan print _file continue; assert( not _file == None or bIncDelist == True ) ''' Open the file only if we have a valid name, otherwise we need delisted data ''' if _file != None: if (self.source==DataSource.CUSTOM) or (self.source==DataSource.YAHOO)or (self.source==DataSource.MLT): creader = csv.reader(_file) row=creader.next() row=creader.next() #row.pop(0) for i, item in enumerate(row): if i==0: try: date = dt.datetime.strptime(item, '%Y-%m-%d') date = date.strftime('%Y%m%d') row[i] = float(date) except: date = dt.datetime.strptime(item, '%m/%d/%y') date = date.strftime('%Y%m%d') row[i] = float(date) else: row[i]=float(item) naData=np.array(row) for row in creader: for i, item in enumerate(row): if i==0: try: date = dt.datetime.strptime(item, '%Y-%m-%d') date = date.strftime('%Y%m%d') row[i] = float(date) except: date = dt.datetime.strptime(item, '%m/%d/%y') date = date.strftime('%Y%m%d') row[i] = float(date) else: row[i]=float(item) naData=np.vstack([np.array(row),naData]) else: naData = pkl.load (_file) _file.close() else: naData = None ''' If we have delisted data, prepend to the current data ''' if bIncDelist == True and len(lsDelPaths) > 0 and naData == None: for sFile in lsDelPaths[-1:]: ''' Changed to only use NEWEST data since sometimes there is overlap (JAVA) ''' inFile = open( sFile, "rb" ) naPrepend = pkl.load( inFile ) inFile.close() if naData == None: naData = naPrepend else: naData = np.vstack( (naPrepend, naData) ) #now remove all the columns except the timestamps and one data column if verbose: print self.getPathOfFile(symbol) ''' Fix 1 row case by reshaping ''' if( naData.ndim == 1 ): naData = naData.reshape(1,-1) #print naData #print list_index ''' We open the file once, for each data item we need, fill out the array in all_stocks_data ''' for lLabelNum, lLabelIndex in enumerate(list_index): ts_ctr = 0 b_skip = True ''' select timestamps and the data column we want ''' temp_np = naData[:,(0,lLabelIndex)] #print temp_np num_rows= temp_np.shape[0] symbol_ts_list = range(num_rows) # preallocate for i in range (0, num_rows): timebase = temp_np[i][0] timeyear = int(timebase/10000) # Quick hack to skip most of the data # Note if we skip ALL the data, we still need to calculate # last time, so we know nothing is valid later in the code if timeyear < ts_list[0].year and i != num_rows - 1: continue elif b_skip == True: ts_ctr = i b_skip = False timemonth = int((timebase-timeyear*10000)/100) timeday = int((timebase-timeyear*10000-timemonth*100)) timehour = 16 #The earliest time it can generate a time for is platform dependent symbol_ts_list[i]=dt.datetime(timeyear,timemonth,timeday,timehour) # To make the time 1600 hrs on the day previous to this midnight #for ends #now we have only timestamps and one data column #Skip data from file which is before the first timestamp in ts_list while (ts_ctr < temp_np.shape[0]) and (symbol_ts_list[ts_ctr] < ts_list[0]): ts_ctr= ts_ctr+1 #print "skipping initial data" #while ends for time_stamp in ts_list: if (symbol_ts_list[-1] < time_stamp): #The timestamp is after the last timestamp for which we have data. So we give up. Note that we don't have to fill in NaNs because that is #the default value. break; else: while ((ts_ctr < temp_np.shape[0]) and (symbol_ts_list[ts_ctr]< time_stamp)): ts_ctr = ts_ctr+1 #while ends #else ends #print "at time_stamp: " + str(time_stamp) + " and symbol_ts " + str(symbol_ts_list[ts_ctr]) if (time_stamp == symbol_ts_list[ts_ctr]): #Data is present for this timestamp. So add to numpy array. #print " adding to numpy array" if (temp_np.ndim > 1): #This if is needed because if a stock has data for 1 day only then the numpy array is 1-D rather than 2-D all_stocks_data[lLabelNum][ts_list.index(time_stamp)][symbol_ctr] = temp_np [ts_ctr][1] else: all_stocks_data[lLabelNum][ts_list.index(time_stamp)][symbol_ctr] = temp_np [1] #if ends ts_ctr = ts_ctr +1 #inner for ends #outer for ends #print all_stocks_data ldmReturn = [] # List of data matrixes to return for naDataLabel in all_stocks_data: ldmReturn.append( pa.DataFrame( naDataLabel, ts_list, symbol_list) ) ''' Contine to support single return type as a non-list ''' if bStr: return ldmReturn[0] else: return ldmReturn #get_data_hardread ends
IOError
dataset/ETHPy150Open QuantSoftware/QuantSoftwareToolkit/QSTK/qstkutil/DataAccess.py/DataAccess.get_data_hardread
6,829
def get_data (self, ts_list, symbol_list, data_item, verbose=False, bIncDelist=False): ''' Read data into a DataFrame, but check to see if it is in a cache first. @param ts_list: List of timestamps for which the data values are needed. Timestamps must be sorted. @param symbol_list: The list of symbols for which the data values are needed @param data_item: The data_item needed. Like open, close, volume etc. May be a list, in which case a list of DataFrame is returned. @param bIncDelist: If true, delisted securities will be included. @note: If a symbol is not found then a message is printed. All the values in the column for that stock will be NaN. Execution then continues as usual. No errors are raised at the moment. ''' # Construct hash -- filename where data may be already # # The idea here is to create a filename from the arguments provided. # We then check to see if the filename exists already, meaning that # the data has already been created and we can just read that file. ls_syms_copy = copy.deepcopy(symbol_list) # Create the hash for the symbols hashsyms = 0 for i in symbol_list: hashsyms = (hashsyms + hash(i)) % 10000000 # Create the hash for the timestamps hashts = 0 # print "test point 1: " + str(len(ts_list)) # spyfile=os.environ['QSDATA'] + '/Processed/Norgate/Stocks/US/NYSE Arca/SPY.pkl' for i in ts_list: hashts = (hashts + hash(i)) % 10000000 hashstr = 'qstk-' + str (self.source)+'-' +str(abs(hashsyms)) + '-' + str(abs(hashts)) \ + '-' + str(hash(str(data_item))) # + '-' + str(hash(str(os.path.getctime(spyfile)))) # get the directory for scratch files from environment # try: # scratchdir = os.environ['QSSCRATCH'] # except KeyError: # #self.rootdir = "/hzr71/research/QSData" # raise KeyError("Please be sure to set the value for QSSCRATCH in config.sh or local.sh") # final complete filename cachefilename = self.scratchdir + '/' + hashstr + '.pkl' if verbose: print "cachefilename is: " + cachefilename # now eather read the pkl file, or do a hardread readfile = False # indicate that we have not yet read the file #check if the cachestall variable is defined. # try: # catchstall=dt.timedelta(hours=int(os.environ['CACHESTALLTIME'])) # except: # catchstall=dt.timedelta(hours=1) cachestall = dt.timedelta(hours=self.cachestalltime) # Check if the file is older than the cachestalltime if os.path.exists(cachefilename): if ((dt.datetime.now() - dt.datetime.fromtimestamp(os.path.getmtime(cachefilename))) < cachestall): if verbose: print "cache hit" try: cachefile = open(cachefilename, "rb") start = time.time() # start timer retval = pkl.load(cachefile) elapsed = time.time() - start # end timer readfile = True # remember success cachefile.close() except __HOLE__: if verbose: print "error reading cache: " + cachefilename print "recovering..." except EOFError: if verbose: print "error reading cache: " + cachefilename print "recovering..." if (readfile!=True): if verbose: print "cache miss" print "beginning hardread" start = time.time() # start timer if verbose: print "data_item(s): " + str(data_item) print "symbols to read: " + str(symbol_list) retval = self.get_data_hardread(ts_list, symbol_list, data_item, verbose, bIncDelist) elapsed = time.time() - start # end timer if verbose: print "end hardread" print "saving to cache" try: cachefile = open(cachefilename,"wb") pkl.dump(retval, cachefile, -1) os.chmod(cachefilename,0666) except IOError: print "error writing cache: " + cachefilename if verbose: print "end saving to cache" if verbose: print "reading took " + str(elapsed) + " seconds" if type(retval) == type([]): for i, df_single in enumerate(retval): retval[i] = df_single.reindex(columns=ls_syms_copy) else: retval = retval.reindex(columns=ls_syms_copy) return retval
IOError
dataset/ETHPy150Open QuantSoftware/QuantSoftwareToolkit/QSTK/qstkutil/DataAccess.py/DataAccess.get_data
6,830
def processkeys(self, cpu): keyptr = 0x9000 for i in range(0, 16): if not cpu.memory[keyptr + i]: try: key = self.term.keys.pop() except __HOLE__: break cpu.memory[keyptr + i] = key
IndexError
dataset/ETHPy150Open jtauber/dcpu16py/plugins/terminalplugin.py/TerminalPlugin.processkeys
6,831
def __init__(self, args): """ Create a terminal based on the term argument """ if args.term == "null": self.loaded = False return BasePlugin.__init__(self) self.time = None sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "terminals"))) try: terminal = importlib.import_module(args.term + "_terminal") except __HOLE__ as e: print("Terminal %s not available: %s" % (args.term, e)) raise SystemExit self.debug = args.debug m = re.match(r"(\d+)x(\d+)", args.geometry) if m is None: print("Invalid geometry `%s`" % args.geometry) args.width, args.height = 80, 24 else: args.width = int(m.group(1)) args.height = int(m.group(2)) self.term = terminal.Terminal(args) self.name += "-%s" % args.term self.term.show()
ImportError
dataset/ETHPy150Open jtauber/dcpu16py/plugins/terminalplugin.py/TerminalPlugin.__init__
6,832
def load(self, *args, **kwargs): for app in self.apps_list: app_class_name = self.apps_location_prefix + "." + app try: app_class = importutils.import_class(app_class_name) app = app_class(*args, **kwargs) self.apps.append(app) except __HOLE__ as e: LOG.exception(_LE("Error loading application by class, %s"), e) raise ImportError(_("Application class not found."))
ImportError
dataset/ETHPy150Open openstack/dragonflow/dragonflow/controller/dispatcher.py/AppDispatcher.load
6,833
def get_payment_model(): ''' Return the Payment model that is active in this project ''' try: app_label, model_name = settings.PAYMENT_MODEL.split('.') except (ValueError, __HOLE__): raise ImproperlyConfigured('PAYMENT_MODEL must be of the form ' '"app_label.model_name"') payment_model = get_model(app_label, model_name) if payment_model is None: msg = ( 'PAYMENT_MODEL refers to model "%s" that has not been installed' % settings.PAYMENT_MODEL) raise ImproperlyConfigured(msg) return payment_model
AttributeError
dataset/ETHPy150Open mirumee/django-payments/payments/__init__.py/get_payment_model
6,834
def detect_starbound_folder(self): known_locations = [ 'C:\Program Files\Steam\SteamApps\common\Starbound', 'C:\Program Files (x86)\Steam\SteamApps\common\Starbound', os.path.expanduser("~/Library/Application Support/Steam/SteamApps/common/Starbound"), os.path.expanduser("~/.steam/root/SteamApps/common/Starbound"), os.path.expanduser("~/.steam/steam/SteamApps/common/Starbound") ] if platform.system() == "Windows": import winreg try: key = "Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\Steam App 211820" if platform.machine().endswith('86'): key = "Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\Steam App 211820" starbound_uninstall = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key) starbound_path = winreg.QueryValueEx(starbound_uninstall, "InstallLocation")[0] known_locations.append(os.path.normpath(starbound_path)) starbound_uninstall.Close() except OSError: pass try: steam = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Valve\\Steam") steam_path = os.path.normpath(winreg.QueryValueEx(steam, "SteamPath")[0]) known_locations.append(os.path.join(steam_path, "SteamApps", "common", "Starbound")) steam.Close() except __HOLE__: pass for path in known_locations: if os.path.isdir(path) and os.path.isfile(os.path.join(path, "assets", "packed.pak")): return path return ""
OSError
dataset/ETHPy150Open wizzomafizzo/starcheat/starcheat/config.py/Config.detect_starbound_folder
6,835
def _isdst(self, dt): try: return super(LocalTimezone, self)._isdst(dt) except (OverflowError, __HOLE__) as exc: exc_type = type(exc) exc_value = exc_type( "Unsupported value: %r. You should install pytz." % dt) exc_value.__cause__ = exc six.reraise(exc_type, exc_value, sys.exc_info()[2])
ValueError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/utils/timezone.py/LocalTimezone._isdst
6,836
def _get_timezone_name(timezone): """ Returns the name of ``timezone``. """ try: # for pytz timezones return timezone.zone except __HOLE__: # for regular tzinfo objects return timezone.tzname(None) # Timezone selection functions. # These functions don't change os.environ['TZ'] and call time.tzset() # because it isn't thread safe.
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/utils/timezone.py/_get_timezone_name
6,837
def __enter__(self): try: from StringIO import StringIO except __HOLE__: from io import StringIO self.old_stderr = sys.stderr sys.stderr = f = StringIO() return f
ImportError
dataset/ETHPy150Open johncsnyder/SwiftKitten/cffi/testing/support.py/StdErrCapture.__enter__
6,838
def enumerate_dynamic_imports(tokens): """ Returns a dictionary of all dynamically imported modules (those inside of classes or functions) in the form of {<func or class name>: [<modules>]} Example: >>> enumerate_dynamic_modules(tokens) {'myfunc': ['zlib', 'base64']} """ imported_modules = [] import_line = False for index, tok in enumerate(tokens): token_type = tok[0] token_string = tok[1] if token_type == tokenize.NEWLINE: import_line = False elif token_string == "import": try: if tokens[index-1][0] == tokenize.NEWLINE: import_line = True except __HOLE__: import_line = True # Just means this is the first line elif import_line: if token_type == tokenize.NAME and tokens[index+1][1] != 'as': if token_string not in reserved_words: if token_string not in imported_modules: imported_modules.append(token_string) return imported_modules
IndexError
dataset/ETHPy150Open b3mb4m/shellsploit-framework/shell/Session/encoders/pyminifier/analyze.py/enumerate_dynamic_imports
6,839
def enumerate_import_methods(tokens): """ Returns a list of imported module methods (such as re.compile) inside *tokens*. """ global_imports = enumerate_global_imports(tokens) out = [] for item in global_imports: for index, tok in enumerate(tokens): try: next_tok = tokens[index+1] try: next_next_tok = tokens[index+2] except __HOLE__: # Pretend it is a newline next_next_tok = (54, '\n', (1, 1), (1, 2), '#\n') except IndexError: # Last token, no biggie # Pretend it is a newline here too next_tok = (54, '\n', (1, 1), (1, 2), '#\n') token_type = tok[0] token_string = tok[1] if token_string == item: if next_tok[1] == '.': # We're calling a method module_method = "%s.%s" % (token_string, next_next_tok[1]) if module_method not in out: out.append(module_method) return out
IndexError
dataset/ETHPy150Open b3mb4m/shellsploit-framework/shell/Session/encoders/pyminifier/analyze.py/enumerate_import_methods
6,840
def ListDirectory(self, pathspec, state, depth=0): """A Recursive generator of files.""" # Limit recursion depth if depth >= self.request.max_depth: return try: fd = vfs.VFSOpen(pathspec, progress_callback=self.Progress) files = fd.ListFiles() except (IOError, OSError) as e: if depth == 0: # We failed to open the directory the server asked for because dir # doesn't exist or some other reason. So we set status and return # back to the caller ending the Iterator. self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.IOERROR, e) else: # Can't open the directory we're searching, ignore the directory. logging.info("Find failed to ListDirectory for %s. Err: %s", pathspec, e) return # If we are not supposed to cross devices, and don't know yet # which device we are on, we need to find out. if not self.request.cross_devs and self.filesystem_id is None: dir_stat = fd.Stat() self.filesystem_id = dir_stat.st_dev # Recover the start point for this directory from the state dict so we can # resume. start = state.get(pathspec.CollapsePath(), 0) for i, file_stat in enumerate(files): # Skip the files we already did before if i < start: continue if stat.S_ISDIR(file_stat.st_mode): # Do not traverse directories in a different filesystem. if self.request.cross_devs or self.filesystem_id == file_stat.st_dev: for child_stat in self.ListDirectory(file_stat.pathspec, state, depth + 1): yield child_stat state[pathspec.CollapsePath()] = i + 1 yield file_stat # Now remove this from the state dict to prevent it from getting too large try: del state[pathspec.CollapsePath()] except __HOLE__: pass
KeyError
dataset/ETHPy150Open google/grr/grr/client/client_actions/searching.py/Find.ListDirectory
6,841
def TestFileContent(self, file_stat): """Checks the file for the presence of the regular expression.""" # Content regex check try: data = "" with vfs.VFSOpen(file_stat.pathspec, progress_callback=self.Progress) as fd: # Only read this much data from the file. while fd.Tell() < self.request.max_data: data_read = fd.read(1024000) if not data_read: break data += data_read # Got it. if self.request.data_regex.Search(data): return True # Keep a bit of context from the last buffer to ensure we dont miss a # match broken by buffer. We do not expect regex's to match something # larger than about 100 chars. data = data[-100:] except (IOError, __HOLE__): pass return False
KeyError
dataset/ETHPy150Open google/grr/grr/client/client_actions/searching.py/Find.TestFileContent
6,842
def add_data_to_combo(self, data): """ Add a data object to the combo box, if not already present """ if not self.client.can_image_data(data): return combo = self.ui.displayDataCombo try: pos = _find_combo_data(combo, data) except __HOLE__: combo.addItem(data.label, userData=data)
ValueError
dataset/ETHPy150Open glue-viz/glue/glue/viewers/image/qt/viewer_widget.py/ImageWidgetBase.add_data_to_combo
6,843
def adapt_old_url_format(self, url): warnings.warn("Using deprecated connection string format.", DeprecationWarning) password = self.options.get("PASSWORD", None) try: host, port, db = url.split(":") port = port if host == "unix" else int(port) db = int(db) if host == "unix": if password: url = "unix://:{password}@{port}?db={db}" else: url = "unix://{port}?db={db}" else: if password: url = "redis://:{password}@{host}:{port}?db={db}" else: url = "redis://{host}:{port}?db={db}" return url.format(password=password, host=host, port=port, db=db) except (ValueError, __HOLE__): raise ImproperlyConfigured("Incorrect format '%s'" % (url))
TypeError
dataset/ETHPy150Open niwinz/django-redis/django_redis/pool.py/ConnectionFactory.adapt_old_url_format
6,844
@cp.expose def default(self, *path, **data): """ Resolves URL paths to handler functions and casts the return value. """ # If there is an app.thread.db connection, # pass it as a keyword argument named "db". # If there is a query parameter named "db", # it is overwritten (the reverse is not safe). for k, v in g.items(): data[k] = v # Call the handler function for the given path. # Call @app.error(404) if no handler is found. # Call @app.error(403) if rate limit forbidden (= no API key). # Call @app.error(429) if rate limit exceeded. # Call @app.error(503) if a database error occurs. try: v = self.router(path, **data) except RouteError: raise cp.HTTPError("404 Not Found") except RateLimitForbidden: raise cp.HTTPError("403 Forbidden") except RateLimitExceeded: raise cp.HTTPError("429 Too Many Requests") except DatabaseError as e: raise cp.HTTPError("503 Service Unavailable", message=str(e)) except HTTPRedirect as e: raise cp.HTTPRedirect(e.url) except __HOLE__ as e: raise cp.HTTPError(e.status, message=e.message) v = self._cast(v) #print(self.elapsed) return v
HTTPError
dataset/ETHPy150Open clips/pattern/pattern/server/__init__.py/Application.default
6,845
def _run_exitfuncs(): """run any registered exit functions _exithandlers is traversed in reverse order so functions are executed last in, first out. """ exc_info = None while _exithandlers: func, targs, kargs = _exithandlers.pop() try: func(*targs, **kargs) except __HOLE__: exc_info = sys.exc_info() except: import traceback print >> sys.stderr, "Error in atexit._run_exitfuncs:" traceback.print_exc() exc_info = sys.exc_info() if exc_info is not None: raise exc_info[0], exc_info[1], exc_info[2]
SystemExit
dataset/ETHPy150Open babble/babble/include/jython/Lib/atexit.py/_run_exitfuncs
6,846
def custom_importer(name, globals=None, locals=None, fromlist=None, level=-1): """ The web2py custom importer. Like the standard Python importer but it tries to transform import statements as something like "import applications.app_name.modules.x". If the import failed, fall back on naive_importer """ globals = globals or {} locals = locals or {} fromlist = fromlist or [] try: if current.request._custom_import_track_changes: base_importer = TRACK_IMPORTER else: base_importer = NATIVE_IMPORTER except: # there is no current.request (should never happen) base_importer = NATIVE_IMPORTER # if not relative and not from applications: if hasattr(current, 'request') \ and level <= 0 \ and not name.partition('.')[0] in INVALID_MODULES \ and isinstance(globals, dict): import_tb = None try: try: oname = name if not name.startswith('.') else '.'+name return NATIVE_IMPORTER(oname, globals, locals, fromlist, level) except ImportError: items = current.request.folder.split(os.path.sep) if not items[-1]: items = items[:-1] modules_prefix = '.'.join(items[-2:]) + '.modules' if not fromlist: # import like "import x" or "import x.y" result = None for itemname in name.split("."): new_mod = base_importer( modules_prefix, globals, locals, [itemname], level) try: result = result or new_mod.__dict__[itemname] except KeyError, e: raise ImportError, 'Cannot import module %s' % str(e) modules_prefix += "." + itemname return result else: # import like "from x import a, b, ..." pname = modules_prefix + "." + name return base_importer(pname, globals, locals, fromlist, level) except __HOLE__, e1: import_tb = sys.exc_info()[2] try: return NATIVE_IMPORTER(name, globals, locals, fromlist, level) except ImportError, e3: raise ImportError, e1, import_tb # there an import error in the module except Exception, e2: raise e2 # there is an error in the module finally: if import_tb: import_tb = None return NATIVE_IMPORTER(name, globals, locals, fromlist, level)
ImportError
dataset/ETHPy150Open uwdata/termite-visualizations/web2py/gluon/custom_import.py/custom_importer
6,847
def handle(self, *storage_paths, **kwargs): verbosity = int(kwargs.get("verbosity", 1)) for storage_path in storage_paths: if verbosity >= 1: self.stdout.write("Syncing meta for {}".format(storage_path)) # Import the storage. try: storage = import_string(storage_path) except __HOLE__: raise CommandError("Could not import {}".format(storage_path)) # Sync the meta. for path in storage.sync_meta_iter(): if verbosity >= 1: self.stdout.write(" Synced meta for {}".format(path))
ImportError
dataset/ETHPy150Open etianen/django-s3-storage/django_s3_storage/management/commands/s3_sync_meta.py/Command.handle
6,848
@result.setter def result(self, raw_result): # If check_error=False, run_vsctl can return None if not raw_result: self._result = None return try: json = jsonutils.loads(raw_result) except (__HOLE__, TypeError) as e: # This shouldn't happen, but if it does and we check_errors # log and raise. with excutils.save_and_reraise_exception(): LOG.error(_LE("Could not parse: %(raw_result)s. " "Exception: %(exception)s"), {'raw_result': raw_result, 'exception': e}) headings = json['headings'] data = json['data'] results = [] for record in data: obj = {} for pos, heading in enumerate(headings): obj[heading] = ovsdb.val_to_py(record[pos]) results.append(obj) self._result = results
ValueError
dataset/ETHPy150Open openstack/neutron/neutron/agent/ovsdb/impl_vsctl.py/DbCommand.result
6,849
def open(self): ''' Build the base query object for this wrapper, then add all of the counters required for the query. Raise a QueryError if we can't complete the functions. If we are already open, then do nothing. ''' if not self.active: # to prevent having multiple open queries # curpaths are made accessible here because of the possibility of volatile paths # which may be dynamically altered by subclasses. self.curpaths = copy.copy(self.paths) try: base = win32pdh.OpenQuery() for path in self.paths: try: self.counters.append(win32pdh.AddCounter(base, path)) except win32api.error: # we passed a bad path self.counters.append(0) pass self._base = base self.active = 1 return 0 # open succeeded except: # if we encounter any errors, kill the Query try: self.killbase(base) except __HOLE__: # failed in creating query pass self.active = 0 self.curpaths = [] raise QueryError(self) return 1 # already open
NameError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32pdhquery.py/BaseQuery.open
6,850
def close(self): ''' Makes certain that the underlying query object has been closed, and that all counters have been removed from it. This is important for reference counting. You should only need to call close if you have previously called open. The collectdata methods all can handle opening and closing the query. Calling close multiple times is acceptable. ''' try: self.killbase(self._base) except __HOLE__: self.killbase()
AttributeError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32pdhquery.py/BaseQuery.close
6,851
def getinstpaths(self,object,counter,machine=None,objtype='Process',format = win32pdh.PDH_FMT_LONG): ''' ### Not an end-user function Calculate the paths for an instance object. Should alter to allow processing for lists of object-counter pairs. ''' items, instances = win32pdh.EnumObjectItems(None,None,objtype, -1) # find out how many instances of this element we have... instances.sort() try: cur = instances.index(object) except __HOLE__: return [] # no instances of this object temp = [object] try: while instances[cur+1] == object: temp.append(object) cur = cur+1 except IndexError: # if we went over the end pass paths = [] for ind in range(len(temp)): # can this raise an error? paths.append(win32pdh.MakeCounterPath( (machine,'Process',object,None,ind,counter) ) ) return paths # should also return the number of elements for naming purposes
ValueError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32pdhquery.py/Query.getinstpaths
6,852
def init_url_map(self, url_module): self.has_error_on_init_url_map = False mod = import_string(url_module) if hasattr(mod, 'view_groups'): base_rules = [] self.views = {} for view_group in getattr(mod, 'view_groups'): try: base_rules = base_rules + view_group.get_rules() self.views.update(view_group.get_views()) except Exception, e: logging.error("Failed to mount ViewGroup: %s", e) raise import copy self.url_map = Map(copy.deepcopy(base_rules)) else: make_url = getattr(mod, 'make_url') all_views = getattr(mod, 'all_views') self.views = all_views self.url_map = make_url() for app in self.get_installed_apps(): mountpoint = self.get_mount_point(app) if mountpoint is None: logging.debug("Mountpoint for app '%s' is set to None explicitly," " skipped." % app) continue try: url_mod = import_string("%s.%s" % (app, url_module)) except (ImportError, __HOLE__): try: url_mod = import_string("%s.urls" % app) except (ImportError, AttributeError): logging.error("Failed to import app '%s.urls'." % app) logging.debug("Reason:\n%s" % self._get_traceback(sys.exc_info())) raise rules = [] if hasattr(url_mod, 'view_groups'): for view_group in getattr(url_mod, 'view_groups'): try: endpoint_prefix = app.split(".")[-1] rules = rules + view_group.get_rules(endpoint_prefix) self.views.update(view_group.get_views(endpoint_prefix)) except Exception, e: logging.error("Failed to mount ViewGroup: %s", e) raise else: make_rules = getattr(url_mod, 'make_rules', None) if make_rules: rules = make_rules() all_views = getattr(url_mod, 'all_views', None) if all_views: self.views.update(all_views) self.url_map.add(Submount(mountpoint, rules)) # TODO move the block bellow to somewhere else if 'kay.auth.middleware.AuthenticationMiddleware' in \ self.app_settings.MIDDLEWARE_CLASSES: try: klass = import_string(self.app_settings.AUTH_USER_BACKEND) except (AttributeError, ImportError), e: raise exceptions.ImproperlyConfigured, \ 'Failed to import %s: "%s".' %\ (self.app_settings.AUTH_USER_BACKEND, e) self.auth_backend = klass()
AttributeError
dataset/ETHPy150Open IanLewis/kay/kay/app.py/KayApp.init_url_map
6,853
def init_jinja2_environ(self): """ Initialize the environment for jinja2. """ if os.environ.get("SERVER_NAME", None) == "localhost" or \ os.environ.get("SERVER_SOFTWARE", "").startswith("Dev"): from jinja2 import (FileSystemLoader, ChoiceLoader, PrefixLoader,) template_postfix = "" else: from kay.utils.jinja2utils.code_loaders import FileSystemCodeLoader as \ FileSystemLoader from kay.utils.jinja2utils.code_loaders import ChoiceCodeLoader as \ ChoiceLoader from kay.utils.jinja2utils.code_loaders import PrefixCodeLoader as \ PrefixLoader template_postfix = "_compiled" per_app_loaders = {} for app in self.get_installed_apps(): try: mod = import_string(app) except (ImportError, AttributeError): logging.warning("Failed to import app '%s', skipped." % app) continue try: app_key = getattr(mod, 'template_loader_key') except AttributeError: app_key = get_app_tailname(app) per_app_loaders[app_key] = FileSystemLoader( os.path.join(os.path.dirname(mod.__file__), "templates"+template_postfix)) loader = PrefixLoader(per_app_loaders) target_dirs = self.app_settings.TEMPLATE_DIRS+("kay/templates",) def replace_dirname(orig): if 'templates' in orig: return orig.replace('templates', 'templates'+template_postfix) else: return orig+template_postfix import kay base_loader = FileSystemLoader( [os.path.join(kay.PROJECT_DIR, replace_dirname(d)) for d in target_dirs]) loader = ChoiceLoader([base_loader, loader]) env_dict = {} env_dict.update(self.app_settings.JINJA2_ENVIRONMENT_KWARGS) jinja2_ext = [] for ext_str in self.app_settings.JINJA2_EXTENSIONS: try: ext = import_string(ext_str) except (__HOLE__, AttributeError), e: logging.warn('Failed to import jinja2 extension %s: "%s", skipped.' % (ext_str, e)) continue jinja2_ext.append(ext) env_dict.update(dict(loader = loader, undefined=NullUndefined, extensions=jinja2_ext)) self._jinja2_env = Environment(**env_dict) for key, filter_str in self.app_settings.JINJA2_FILTERS.iteritems(): try: func = import_string(filter_str) except (ImportError, AttributeError): logging.warn('Cannot import %s.' % filter_str) continue if self._jinja2_env.filters.has_key(key): logging.warn('Key "%s" has already defined, skipped.' % key) continue if not callable(func): logging.warn('%s is not a callable.' % filter_str) continue self._jinja2_env.filters[key] = func
ImportError
dataset/ETHPy150Open IanLewis/kay/kay/app.py/KayApp.init_jinja2_environ
6,854
def load_middleware(self): self._response_middleware = [] self._view_middleware = [] self._exception_middleware = [] request_middleware = [] for mw_path in self.app_settings.MIDDLEWARE_CLASSES: try: mw_class = import_string(mw_path) except (__HOLE__, AttributeError), e: raise exceptions.ImproperlyConfigured, \ '%s isn\'t a valid middleware module: "%s"' % (mw_path, e) try: mw_instance = mw_class() except exceptions.MiddlewareNotUsed: continue if hasattr(mw_instance, 'process_request'): request_middleware.append(mw_instance.process_request) if hasattr(mw_instance, 'process_view'): self._view_middleware.append(mw_instance.process_view) if hasattr(mw_instance, 'process_response'): self._response_middleware.insert(0, mw_instance.process_response) if hasattr(mw_instance, 'process_exception'): self._exception_middleware.insert(0, mw_instance.process_exception) # We only assign to this when initialization is complete as it is used # as a flag for initialization being complete. self._request_middleware = request_middleware
ImportError
dataset/ETHPy150Open IanLewis/kay/kay/app.py/KayApp.load_middleware
6,855
def get_response(self, request): global translations_cache if self.app_settings.USE_I18N: from kay.i18n import load_translations from kay.i18n import get_language_from_request lang = get_language_from_request(request) if not lang: lang = self.app_settings.DEFAULT_LANG translations = translations_cache.get("trans:%s:%s" % (self.app_settings.APP_NAME, lang), None) if translations is None: translations = load_translations(lang) translations_cache["trans:%s:%s" % (self.app_settings.APP_NAME, lang)] = translations self.active_translations = translations else: from kay.i18n import KayNullTranslations lang = None self.active_translations = KayNullTranslations() request.lang = lang if self._request_middleware is None: self.load_middleware() try: try: endpoint, values = local.url_adapter.match() except RequestRedirect, e: if request.args: e.new_url += '?' + url_encode(request.args) raise e if self.app_settings.IS_MARKETPLACE_APP: if values.has_key(settings.MARKETPLACE_DOMAIN_NAME_KEY): setattr(request, settings.MARKETPLACE_DOMAIN_NAME_KEY, values[settings.MARKETPLACE_DOMAIN_NAME_KEY]) # apply request middleware for mw_method in self._request_middleware: response = mw_method(request) if response: return response view_func = self.views.get(endpoint, None) try: if isinstance(view_func, tuple): view_classname, args, kwargs = view_func view_cls = import_string(view_classname) view_func = view_cls(*args, **kwargs) elif isinstance(view_func, basestring): view_func = import_string(view_func) assert(callable(view_func)) except StandardError, e: logging.error(self._get_traceback(sys.exc_info())) raise InternalServerError(e) for mw_method in self._view_middleware: response = mw_method(request, view_func, **values) if response: return response try: response = view_func(request, **values) except Exception, e: # If the view raised an exception, run it through exception # middleware, and if the exception middleware returns a # response, use that. Otherwise, reraise the exception. for middleware_method in self._exception_middleware: response = middleware_method(request, e) if response: return response raise except RequestRedirect, e: response = e.get_response(None) except HTTPException, e: logging.warning(e) response = render_error(e) except __HOLE__: # Allow sys.exit() to actually exit. raise except CapabilityDisabledError, e: from kay.i18n import gettext as _ logging.error(e) # Saving session will also fail. if hasattr(request, 'session'): del(request.session) return Response( render_to_string( "_internal/maintenance.html", {"message": _('Appengine might be under maintenance.')}), content_type="text/html; charset=utf-8", status=503) except Exception: # Handle everything else, including SuspiciousOperation, etc. # Get the exception info now, in case another exception is thrown later. exc_info = sys.exc_info() return self.handle_uncaught_exception(request, exc_info) return response
SystemExit
dataset/ETHPy150Open IanLewis/kay/kay/app.py/KayApp.get_response
6,856
def _get_traceback(self, exc_info): "Helper function to return the traceback as a string" import traceback ret = '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info()))) try: return ret.decode('utf-8') except __HOLE__: return ret
UnicodeDecodeError
dataset/ETHPy150Open IanLewis/kay/kay/app.py/KayApp._get_traceback
6,857
def handle_module_upgrade_request(controller, module_id, pipeline): from vistrails.core.upgradeworkflow import UpgradeWorkflowHandler create_new_connection = UpgradeWorkflowHandler.create_new_connection reg = vistrails.core.modules.module_registry.get_module_registry() def find_module_in_upgrade_action(action): for op in action.operations: if isinstance(op, AddOp): if op.what == Module.vtType: return op.data return None def find_inputs(m): functions = {} for f in m.functions: if f.name not in functions: functions[f.name] = [f] else: functions[f.name].append(f) connections = {} for edge in pipeline.graph.iter_edges_to(m.id): c = pipeline.connections[edge[2]] if c.destination.name not in connections: connections[c.destination.name] = [c] else: connections[c.destination.name].append(c) return (functions, connections) def find_figure(m): for edge in pipeline.graph.iter_edges_from(m.id): to_m = pipeline.modules[edge[1]] if to_m.name == 'MplFigure': # !!! assume only a single down-stream MplFigure !!! # may have old or new module... if pipeline.connections[edge[2]].destination.name == 'addPlot': return (to_m, None) else: return (to_m, edge) return (None, None) def attach_inputs(new_module, inputs, selected_inputs): conns = [] for port_name in selected_inputs: if port_name in inputs[0]: for f in inputs[0][port_name]: if len(f.parameters) > 0: new_param_vals, aliases = zip(*[(p.strValue, p.alias) for p in f.parameters]) else: new_param_vals = [] aliases = [] new_f = controller.create_function(new_module, port_name, new_param_vals, aliases) new_module.add_function(new_f) if port_name in inputs[1]: for c in inputs[1][port_name]: source_module = pipeline.modules[c.source.id] new_conn = create_new_connection(controller, source_module, c.source, new_module, port_name) conns.append(new_conn) return conns module = pipeline.modules[module_id] to_properties = [] to_axes = [] old_figure = (None, None) props_name = None if module.name == 'MplScatterplot': props_name = 'MplPathCollectionProperties' props_input = 'pathCollectionProperties' to_properties = ['facecolor'] to_axes = ['title', 'xlabel', 'ylabel'] inputs = find_inputs(module) old_loc = module.location old_figure = find_figure(module) elif module.name == 'MplHistogram': props_name = 'MplRectangleProperties' props_input = 'rectangleProperties' to_properties = ['facecolor'] to_axes = ['title', 'xlabel', 'ylabel'] inputs = find_inputs(module) old_loc = module.location old_figure = find_figure(module) module_remap = {'MplPlot': [(None, '1.0.0', 'MplSource', {'dst_port_remap': {'source': 'source', 'Hide Toolbar': None}, 'src_port_remap': {'source': 'value', 'self': 'value'}})], 'MplFigure': [(None, '1.0.0', None, {'dst_port_remap': {'Script': 'addPlot'}, 'src_port_remap': {'FigureManager': 'figure', 'File': 'file'}}), ('1.0.0', '1.0.6', None, {'src_port_remap': {'self': 'figure'}})], 'MplFigureCell': [(None, '1.0.0', None, {'dst_port_remap': {'FigureManager': 'figure'}})], # we will delete parts of this but add back later 'MplScatterplot': [(None, '1.0.0', 'MplScatter', {'dst_port_remap': {'xData': 'x', 'yData': 'y', 'facecolor': None, 'title': None, 'xlabel': None, 'ylabel': None, 'self': 'value'}, 'src_port_remap': {'source': 'value'}})], 'MplHistogram': [(None, '1.0.0', 'MplHist', {'dst_port_remap': {'columnData': 'x', 'bins': 'bins', 'facecolor': None, 'title': None, 'xlabel': None, 'ylabel': None, 'self': 'value'}, 'src_port_remap': {'source': 'value'}})], } # '1.0.2' -> '1.0.3' changes 'self' output port to 'value' module_remap.setdefault('MplSource', []).append( (None, '1.0.3', None, { 'src_port_remap': {'self': 'value'}})) if module.name in (m.__name__ for m in _plot_modules + _artist_modules): module_remap.setdefault(module.name, []).append( (None, '1.0.3', None, { 'src_port_remap': {'self': 'value'}})) action_list = [] if old_figure[1] is not None and \ any(p in inputs[0] or p in inputs[1] for p in to_axes): # need to remove the edge between plot and figure pipeline.graph.delete_edge(*old_figure[1]) conn = pipeline.connections[old_figure[1][2]] action = vistrails.core.db.action.create_action([('delete', conn)]) action_list.append(action) try: from vistrails.packages.spreadsheet.init import upgrade_cell_to_output except __HOLE__: pass else: module_remap = upgrade_cell_to_output( module_remap, module_id, pipeline, 'MplFigureCell', 'MplFigureOutput', '1.0.5', 'figure') normal_actions = UpgradeWorkflowHandler.remap_module( controller, module_id, pipeline, module_remap) action_list.extend(normal_actions) more_ops = [] if any(p in inputs[0] or p in inputs[1] for p in to_properties): # create props module if props_name is None: raise RuntimeError("properties module needed for unknown module " "%s" % module.name) desc = reg.get_descriptor_by_name(identifier, props_name) props_module = \ controller.create_module_from_descriptor(desc, old_loc.x + 100, old_loc.y + 100) more_ops.append(('add', props_module)) # attach functions/connections conns = attach_inputs(props_module, inputs, to_properties) more_ops.extend([('add', c) for c in conns]) # attach into pipeline new_plot_module = find_module_in_upgrade_action(normal_actions[0]) assert new_plot_module is not None new_conn = create_new_connection(controller, props_module, 'self', new_plot_module, props_input) more_ops.append(('add', new_conn)) if any(p in inputs[0] or p in inputs[1] for p in to_axes): # create axes module desc = reg.get_descriptor_by_name(identifier, "MplAxesProperties") if old_figure[0] is not None: old_loc = old_figure[0].location axes_module = \ controller.create_module_from_descriptor(desc, old_loc.x + 100, old_loc.y + 100) more_ops.append(('add', axes_module)) # attach functions/connections conns = attach_inputs(axes_module, inputs, to_axes) more_ops.extend([('add', c) for c in conns]) # attach axes properties to new figure if old_figure[0] is not None and old_figure[1] is not None: # remap figure fig_action = UpgradeWorkflowHandler.remap_module(controller, old_figure[0].id, pipeline, module_remap) fig_module = find_module_in_upgrade_action(fig_action[0]) assert fig_module is not None # add the removed edge back in pipeline.graph.add_edge(*old_figure[1]) action_list.extend(fig_action) new_plot_module = find_module_in_upgrade_action(normal_actions[0]) assert new_plot_module is not None conn = create_new_connection(controller, new_plot_module, 'self', fig_module, 'addPlot') action = vistrails.core.db.action.create_action([('add', conn)]) action_list.append(action) else: fig_module = old_figure[0] new_conn = create_new_connection(controller, axes_module, 'self', fig_module, 'axesProperties') more_ops.append(('add', new_conn)) return action_list
ImportError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/matplotlib/init.py/handle_module_upgrade_request
6,858
def re_parts(regex_list, text): """ An iterator that returns the entire text, but split by which regex it matched, or none at all. If it did, the first value of the returned tuple is the index into the regex list, otherwise -1. >>> first_re = re.compile('asdf') >>> second_re = re.compile('an') >>> list(re_parts([first_re, second_re], 'This is an asdf test.')) [(-1, 'This is '), (1, 'an'), (-1, ' '), (0, 'asdf'), (-1, ' test.')] >>> list(re_parts([first_re, second_re], 'asdfasdfasdf')) [(0, 'asdf'), (0, 'asdf'), (0, 'asdf')] >>> list(re_parts([], 'This is an asdf test.')) [(-1, 'This is an asdf test.')] >>> third_re = re.compile('sdf') >>> list(re_parts([first_re, second_re, third_re], 'This is an asdf test.')) [(-1, 'This is '), (1, 'an'), (-1, ' '), (0, 'asdf'), (-1, ' test.')] """ def match_compare(x, y): return x.start() - y.start() prev_end = 0 iter_dict = dict((r, r.finditer(text)) for r in regex_list) # a heapq containing matches matches = [] # bootstrap the search with the first hit for each iterator for regex, iterator in iter_dict.items(): try: match = iterator.next() heappush(matches, (match.start(), match)) except StopIteration: iter_dict.pop(regex) # process matches, revisiting each iterator from which a match is used while matches: # get the earliest match start, match = heappop(matches) end = match.end() if start > prev_end: # yield the text from current location to start of match yield (-1, text[prev_end:start]) # yield the match yield (regex_list.index(match.re), text[start:end]) # get the next match from the iterator for this match if match.re in iter_dict: try: newmatch = iter_dict[match.re].next() heappush(matches, (newmatch.start(), newmatch)) except __HOLE__: iter_dict.pop(match.re) prev_end = end # yield text from end of last match to end of text last_bit = text[prev_end:] if len(last_bit) > 0: yield (-1, last_bit)
StopIteration
dataset/ETHPy150Open ericflo/django-oembed/oembed/core.py/re_parts
6,859
def replace(text, max_width=MAX_WIDTH, max_height=MAX_HEIGHT): """ Scans a block of text, replacing anything matched by a ``ProviderRule`` pattern with an OEmbed html snippet, if possible. Templates should be stored at oembed/{format}.html, so for example: oembed/video.html These templates are passed a context variable, ``response``, which is a dictionary representation of the response. """ rules = list(ProviderRule.objects.all()) patterns = [re.compile(r.regex) for r in rules] # Compiled patterns from the rules parts = [] # The parts that we will assemble into the final return value. indices = [] # List of indices of parts that need to be replaced with OEmbed stuff. indices_rules = [] # List of indices into the rules in order for which index was gotten by. urls = set() # A set of URLs to try to lookup from the database. stored = {} # A mapping of URLs to StoredOEmbed objects. index = 0 # First we pass through the text, populating our data structures. for i, part in re_parts(patterns, text): if i == -1: parts.append(part) index += 1 else: to_append = "" # If the link ends with one of our overrides, build a list while part[-1] in END_OVERRIDES: to_append += part[-1] part = part[:-1] indices.append(index) urls.add(part) indices_rules.append(i) parts.append(part) index += 1 if to_append: parts.append(to_append) index += 1 # Now we fetch a list of all stored patterns, and put it in a dictionary # mapping the URL to to the stored model instance. for stored_embed in StoredOEmbed.objects.filter(match__in=urls, max_width=max_width, max_height = max_height): stored[stored_embed.match] = stored_embed # Now we're going to do the actual replacement of URL to embed. for i, id_to_replace in enumerate(indices): rule = rules[indices_rules[i]] part = parts[id_to_replace] try: # Try to grab the stored model instance from our dictionary, and # use the stored HTML fragment as a replacement. parts[id_to_replace] = stored[part].html except KeyError: try: # Build the URL based on the properties defined in the OEmbed spec. url = u"%s?url=%s&maxwidth=%s&maxheight=%s&format=%s" % ( rule.endpoint, part, max_width, max_height, FORMAT ) # Fetch the link and parse the JSON. resp = simplejson.loads(fetch(url)) # link types that don't have html elements aren't dealt with right now. if resp['type'] == 'link' and 'html' not in resp: raise ValueError # Depending on the embed type, grab the associated template and # pass it the parsed JSON response as context. replacement = render_to_string('oembed/%s.html' % resp['type'], {'response': resp}) if replacement: stored_embed = StoredOEmbed.objects.create( match = part, max_width = max_width, max_height = max_height, html = replacement, ) stored[stored_embed.match] = stored_embed parts[id_to_replace] = replacement else: raise ValueError except __HOLE__: parts[id_to_replace] = part except KeyError: parts[id_to_replace] = part except urllib2.HTTPError: parts[id_to_replace] = part # Combine the list into one string and return it. return mark_safe(u''.join(parts))
ValueError
dataset/ETHPy150Open ericflo/django-oembed/oembed/core.py/replace
6,860
def request(self, endpoint, headers = None, params = None, post = False): s = requests.Session() if headers is None: headers = {} headers.update({ "X-Casper-API-Key": self.apiKey, "X-Casper-Signature": self.generateRequestSignature(params), "User-Agent": self.USER_AGENT }) s.headers = headers requestURL = "{0}{1}".format(self.URL, endpoint) if post: res = s.post(requestURL, data = params, timeout = 10, verify = False) else: res = s.get(requestURL, timeout = 10, verify = False) try: rJSON = res.json() except __HOLE__: raise CasperException("Failed to decode response!") if res.status_code != 200: if "code" in rJSON and "message" in rJSON: raise CasperException("API Response: [{0}] {1}".format(rJSON["code"], rJSON["message"])) else: raise CasperException("API Response: [{0}] Unknown Error Message".format(res.status_code)) return rJSON
ValueError
dataset/ETHPy150Open rxw/snapy/snapy/Agent.py/CasperAgent.request
6,861
def GetValue(self): try: if self.qid: return self.qid else: return None except __HOLE__: return None
AttributeError
dataset/ETHPy150Open ODM2/ODMToolsPython/odmtools/gui/frmFlagValues.py/frmFlagValues.GetValue
6,862
def _close_shelve_and_unlock(self): try: if self.storage: self.storage.close() except __HOLE__: pass finally: self.storage = None if self.locker and self.locked: portalocker.unlock(self.locker) self.locker.close() self.locked = False
ValueError
dataset/ETHPy150Open uwdata/termite-visualizations/web2py/gluon/cache.py/CacheOnDisk._close_shelve_and_unlock
6,863
def __init__(self, request): """ Parameters ---------- request: the global request object """ # GAE will have a special caching if have_settings and settings.global_settings.web2py_runtime_gae: from gluon.contrib.gae_memcache import MemcacheClient self.ram = self.disk = MemcacheClient(request) else: # Otherwise use ram (and try also disk) self.ram = CacheInRam(request) try: self.disk = CacheOnDisk(request) except __HOLE__: logger.warning('no cache.disk (IOError)') except AttributeError: # normally not expected anymore, as GAE has already # been accounted for logger.warning('no cache.disk (AttributeError)')
IOError
dataset/ETHPy150Open uwdata/termite-visualizations/web2py/gluon/cache.py/Cache.__init__
6,864
def take(self, key, path=None): """Remove a value from the shared stash and return it. :param key: A UUID to use as the data's key. :param path: The path that has access to read the data (by default the current request path)""" internal_key = self._wrap_key(key, path) value = self.data.get(internal_key, None) if not value is None: try: self.data.pop(internal_key) except __HOLE__: # Silently continue when pop error occurs. pass return value
KeyError
dataset/ETHPy150Open w3c/wptserve/wptserve/stash.py/Stash.take
6,865
@property def root(self): try: if sys.version_info[:2] >= (3, 0): return self.iter_roots().__next__() else: return self.iter_roots().next() except __HOLE__: return None
StopIteration
dataset/ETHPy150Open cidles/graf-python/src/graf/graphs.py/Graph.root
6,866
@property def parent(self): try: if sys.version_info[:2] >= (3, 0): return self.iter_parents().__next__() else: return self.iter_parents().next() except __HOLE__: raise AttributeError('%r has no parents' % self)
StopIteration
dataset/ETHPy150Open cidles/graf-python/src/graf/graphs.py/Node.parent
6,867
def config_from_file(filename, config=None): """Small configuration file management function.""" if config: # We're writing configuration try: with open(filename, 'w') as fdesc: fdesc.write(json.dumps(config)) except __HOLE__ as error: _LOGGER.error('Saving config file failed: %s', error) return False return True else: # We're reading config if os.path.isfile(filename): try: with open(filename, 'r') as fdesc: return json.loads(fdesc.read()) except IOError as error: _LOGGER.error('Reading config file failed: %s', error) # This won't work yet return False else: return {} # pylint: disable=abstract-method
IOError
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/media_player/plex.py/config_from_file
6,868
def decode_TEXT(value): r"""Decode :rfc:`2047` TEXT (e.g. "=?utf-8?q?f=C3=BCr?=" -> "f\xfcr").""" try: # Python 3 from email.header import decode_header except __HOLE__: from email.Header import decode_header atoms = decode_header(value) decodedvalue = "" for atom, charset in atoms: if charset is not None: atom = atom.decode(charset) decodedvalue += atom return decodedvalue
ImportError
dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/lib/httputil.py/decode_TEXT
6,869
def valid_status(status): """Return legal HTTP status Code, Reason-phrase and Message. The status arg must be an int, or a str that begins with an int. If status is an int, or a str and no reason-phrase is supplied, a default reason-phrase will be provided. """ if not status: status = 200 status = str(status) parts = status.split(" ", 1) if len(parts) == 1: # No reason supplied. code, = parts reason = None else: code, reason = parts reason = reason.strip() try: code = int(code) except __HOLE__: raise ValueError("Illegal response status from server " "(%s is non-numeric)." % repr(code)) if code < 100 or code > 599: raise ValueError("Illegal response status from server " "(%s is out of range)." % repr(code)) if code not in response_codes: # code is unknown but not illegal default_reason, message = "", "" else: default_reason, message = response_codes[code] if reason is None: reason = default_reason return code, reason, message # NOTE: the parse_qs functions that follow are modified version of those # in the python3.0 source - we need to pass through an encoding to the unquote # method, but the default parse_qs function doesn't allow us to. These do.
ValueError
dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/lib/httputil.py/valid_status
6,870
def setdefault(self, key, x=None): key = str(key).title() try: return self[key] except __HOLE__: self[key] = x return x
KeyError
dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/lib/httputil.py/CaseInsensitiveDict.setdefault
6,871
def apply(self, pcoll): # Since the PTransform will be implemented entirely as a function # (once called), we need to pass through any type-hinting information that # may have been annotated via the .with_input_types() and # .with_output_types() methods. kwargs = dict(self._kwargs) args = tuple(self._args) try: if 'type_hints' in inspect.getargspec(self.fn).args: args = (self.get_type_hints(),) + args except __HOLE__: # Might not be a function. pass return self.fn(self.label, pcoll, *args, **kwargs)
TypeError
dataset/ETHPy150Open GoogleCloudPlatform/DataflowPythonSDK/google/cloud/dataflow/transforms/ptransform.py/CallablePTransform.apply
6,872
def test_weighted_dns_load_balancer_with_different_domains(monkeypatch): senza.traffic.DNS_ZONE_CACHE = {} def my_client(rtype, *args): if rtype == 'route53': route53 = MagicMock() route53.list_hosted_zones.return_value = {'HostedZones': [{'Id': '/hostedzone/123456', 'Name': 'zo.ne.dev.', 'ResourceRecordSetCount': 23}, {'Id': '/hostedzone/123457', 'Name': 'zo.ne.com.', 'ResourceRecordSetCount': 23}], 'IsTruncated': False, 'MaxItems': '100'} return route53 return MagicMock() monkeypatch.setattr('boto3.client', my_client) configuration = { "Name": "test_lb", "SecurityGroups": "", "HTTPPort": "9999", 'MainDomain': 'great.api.zo.ne.com', 'VersionDomain': 'version.api.zo.ne.dev' } info = {'StackName': 'foobar', 'StackVersion': '0.1'} definition = {"Resources": {}} args = MagicMock() args.region = "foo" mock_string_result = MagicMock() mock_string_result.return_value = "foo" monkeypatch.setattr('senza.components.elastic_load_balancer.find_ssl_certificate_arn', mock_string_result) monkeypatch.setattr('senza.components.elastic_load_balancer.resolve_security_groups', mock_string_result) result = component_weighted_dns_elastic_load_balancer(definition, configuration, args, info, False, AccountArguments('dummyregion')) assert 'zo.ne.com.' == result["Resources"]["test_lbMainDomain"]["Properties"]['HostedZoneName'] assert 'zo.ne.dev.' == result["Resources"]["test_lbVersionDomain"]["Properties"]['HostedZoneName'] configuration = { "Name": "test_lb", "SecurityGroups": "", "HTTPPort": "9999", 'MainDomain': 'this.does.not.exists.com', 'VersionDomain': 'this.does.not.exists.com' } senza.traffic.DNS_ZONE_CACHE = {} try: result = component_weighted_dns_elastic_load_balancer(definition, configuration, args, info, False, AccountArguments('dummyregion')) except __HOLE__: pass except: assert False, 'raise unknown exception' else: print(result) print(configuration) assert False, 'doesn\'t raise a ValueError exception'
ValueError
dataset/ETHPy150Open zalando-stups/senza/tests/test_components.py/test_weighted_dns_load_balancer_with_different_domains
6,873
def check_build(): build_dirs = [ 'build', 'build/doctrees', 'build/html', 'build/latex', 'build/plots', 'build/_static', 'build/_templates'] for d in build_dirs: try: os.mkdir(d) except __HOLE__: pass
OSError
dataset/ETHPy150Open pydata/pandas/doc/make.py/check_build
6,874
def auto_dev_build(debug=False): msg = '' try: step = 'clean' clean() step = 'html' html() step = 'upload dev' upload_dev() if not debug: sendmail(step) step = 'latex' latex() step = 'upload pdf' upload_dev_pdf() if not debug: sendmail(step) except (Exception, __HOLE__) as inst: msg = str(inst) + '\n' sendmail(step, '[ERROR] ' + msg)
SystemExit
dataset/ETHPy150Open pydata/pandas/doc/make.py/auto_dev_build
6,875
def list_reactors(self, tag): ''' Take in the tag from an event and return a list of the reactors to process ''' log.debug('Gathering reactors for tag {0}'.format(tag)) reactors = [] if isinstance(self.opts['reactor'], string_types): try: with salt.utils.fopen(self.opts['reactor']) as fp_: react_map = yaml.safe_load(fp_.read()) except (OSError, __HOLE__): log.error( 'Failed to read reactor map: "{0}"'.format( self.opts['reactor'] ) ) except Exception: log.error( 'Failed to parse YAML in reactor map: "{0}"'.format( self.opts['reactor'] ) ) else: react_map = self.opts['reactor'] for ropt in react_map: if not isinstance(ropt, dict): continue if len(ropt) != 1: continue key = next(iterkeys(ropt)) val = ropt[key] if fnmatch.fnmatch(tag, key): if isinstance(val, string_types): reactors.append(val) elif isinstance(val, list): reactors.extend(val) return reactors
IOError
dataset/ETHPy150Open saltstack/salt/salt/utils/reactor.py/Reactor.list_reactors
6,876
def list_all(self): ''' Return a list of the reactors ''' if isinstance(self.minion.opts['reactor'], string_types): log.debug('Reading reactors from yaml {0}'.format(self.opts['reactor'])) try: with salt.utils.fopen(self.opts['reactor']) as fp_: react_map = yaml.safe_load(fp_.read()) except (OSError, __HOLE__): log.error( 'Failed to read reactor map: "{0}"'.format( self.opts['reactor'] ) ) except Exception: log.error( 'Failed to parse YAML in reactor map: "{0}"'.format( self.opts['reactor'] ) ) else: log.debug('Not reading reactors from yaml') react_map = self.minion.opts['reactor'] return react_map
IOError
dataset/ETHPy150Open saltstack/salt/salt/utils/reactor.py/Reactor.list_all
6,877
def run(self): ''' Enter into the server loop ''' salt.utils.appendproctitle(self.__class__.__name__) # instantiate some classes inside our new process self.event = salt.utils.event.get_event( self.opts['__role'], self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=True) self.wrap = ReactWrap(self.opts) for data in self.event.iter_events(full=True): # skip all events fired by ourselves if data['data'].get('user') == self.wrap.event_user: continue if data['tag'].endswith('salt/reactors/manage/add'): _data = data['data'] res = self.add_reactor(_data['event'], _data['reactors']) self.event.fire_event({'reactors': self.list_all(), 'result': res}, 'salt/reactors/manage/add-complete') elif data['tag'].endswith('salt/reactors/manage/delete'): _data = data['data'] res = self.delete_reactor(_data['event']) self.event.fire_event({'reactors': self.list_all(), 'result': res}, 'salt/reactors/manage/delete-complete') elif data['tag'].endswith('salt/reactors/manage/list'): self.event.fire_event({'reactors': self.list_all()}, 'salt/reactors/manage/list-results') else: reactors = self.list_reactors(data['tag']) if not reactors: continue chunks = self.reactions(data['tag'], data['data'], reactors) if chunks: try: self.call_reactions(chunks) except __HOLE__: log.warning('Exit ignored by reactor')
SystemExit
dataset/ETHPy150Open saltstack/salt/salt/utils/reactor.py/Reactor.run
6,878
def local(self, *args, **kwargs): ''' Wrap LocalClient for running :ref:`execution modules <all-salt.modules>` ''' if 'local' not in self.client_cache: self.client_cache['local'] = salt.client.LocalClient(self.opts['conf_file']) try: self.client_cache['local'].cmd_async(*args, **kwargs) except __HOLE__: log.warning('Attempt to exit reactor. Ignored.') except Exception as exc: log.warning('Exception caught by reactor: {0}'.format(exc))
SystemExit
dataset/ETHPy150Open saltstack/salt/salt/utils/reactor.py/ReactWrap.local
6,879
def runner(self, fun, **kwargs): ''' Wrap RunnerClient for executing :ref:`runner modules <all-salt.runners>` ''' if 'runner' not in self.client_cache: self.client_cache['runner'] = salt.runner.RunnerClient(self.opts) try: self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs)) except __HOLE__: log.warning('Attempt to exit in reactor by runner. Ignored') except Exception as exc: log.warning('Exception caught by reactor: {0}'.format(exc))
SystemExit
dataset/ETHPy150Open saltstack/salt/salt/utils/reactor.py/ReactWrap.runner
6,880
def wheel(self, fun, **kwargs): ''' Wrap Wheel to enable executing :ref:`wheel modules <all-salt.wheel>` ''' if 'wheel' not in self.client_cache: self.client_cache['wheel'] = salt.wheel.Wheel(self.opts) try: self.pool.fire_async(self.client_cache['wheel'].low, args=(fun, kwargs)) except __HOLE__: log.warning('Attempt to in reactor by whell. Ignored.') except Exception as exc: log.warning('Exception caught by reactor: {0}'.format(exc))
SystemExit
dataset/ETHPy150Open saltstack/salt/salt/utils/reactor.py/ReactWrap.wheel
6,881
def caller(self, fun, *args, **kwargs): ''' Wrap Caller to enable executing :ref:`caller modules <all-salt.caller>` ''' log.debug("in caller with fun {0} args {1} kwargs {2}".format(fun, args, kwargs)) args = kwargs['args'] if 'caller' not in self.client_cache: self.client_cache['caller'] = salt.client.Caller(self.opts['conf_file']) try: self.client_cache['caller'].function(fun, *args) except __HOLE__: log.warning('Attempt to exit reactor. Ignored.') except Exception as exc: log.warning('Exception caught by reactor: {0}'.format(exc))
SystemExit
dataset/ETHPy150Open saltstack/salt/salt/utils/reactor.py/ReactWrap.caller
6,882
def __init__(self, settings): self._pool = HTTPConnectionPool(reactor, persistent=True) self._pool.maxPersistentPerHost = settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN') self._pool._factory.noisy = False self._sslMethod = openssl_methods[settings.get('DOWNLOADER_CLIENT_TLS_METHOD')] self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY']) # try method-aware context factory try: self._contextFactory = self._contextFactoryClass(method=self._sslMethod) except __HOLE__: # use context factory defaults self._contextFactory = self._contextFactoryClass() msg = """ '%s' does not accept `method` argument (type OpenSSL.SSL method,\ e.g. OpenSSL.SSL.SSLv23_METHOD).\ Please upgrade your context factory class to handle it or ignore it.""" % ( settings['DOWNLOADER_CLIENTCONTEXTFACTORY'],) warnings.warn(msg) self._default_maxsize = settings.getint('DOWNLOAD_MAXSIZE') self._default_warnsize = settings.getint('DOWNLOAD_WARNSIZE') self._disconnect_timeout = 1
TypeError
dataset/ETHPy150Open scrapy/scrapy/scrapy/core/downloader/handlers/http11.py/HTTP11DownloadHandler.__init__
6,883
def processProxyResponse(self, bytes): """Processes the response from the proxy. If the tunnel is successfully created, notifies the client that we are ready to send requests. If not raises a TunnelError. """ self._protocol.dataReceived = self._protocolDataReceived if TunnelingTCP4ClientEndpoint._responseMatcher.match(bytes): try: # this sets proper Server Name Indication extension # but is only available for Twisted>=14.0 sslOptions = self._contextFactory.creatorForNetloc( self._tunneledHost, self._tunneledPort) except __HOLE__: # fall back to non-SNI SSL context factory sslOptions = self._contextFactory self._protocol.transport.startTLS(sslOptions, self._protocolFactory) self._tunnelReadyDeferred.callback(self._protocol) else: self._tunnelReadyDeferred.errback( TunnelError('Could not open CONNECT tunnel with proxy %s:%s' % ( self._host, self._port)))
AttributeError
dataset/ETHPy150Open scrapy/scrapy/scrapy/core/downloader/handlers/http11.py/TunnelingTCP4ClientEndpoint.processProxyResponse
6,884
def OpenIDFinish(request, default_success_url='/'): response = django.http.HttpResponse() if request.method not in ('GET', 'POST'): return django.http.HttpResponseNotAllowed(['GET', 'POST']) else: args = args_to_dict(request.GET) assert type(args) is dict if request.method == 'POST': args.update(args_to_dict(request.POST)) url = 'http://'+request.META['HTTP_HOST']+django.core.urlresolvers.reverse('openidgae.views.OpenIDFinish') session = openidgae.get_session(request, response) s = {} if session.openid_stuff: try: import pickle s = pickle.loads(str(session.openid_stuff)) except: session.openid_stuff = None session.put() c = Consumer(s, get_store()) auth_response = c.complete(args, url) sreg_response = {} ax_items = {} if auth_response.status == openid.consumer.consumer.SUCCESS: from openid.extensions import sreg sreg_response = sreg.SRegResponse.fromSuccessResponse(auth_response) sreg_response = dict(sreg_response.iteritems()) logging.debug("sreg_response: %r" % sreg_response) from openid.extensions import ax ax_response = ax.FetchResponse.fromSuccessResponse(auth_response) logging.debug("ax_response: %r" % ax_response) if ax_response: SHORTNAMES = ( ('email', 'http://schema.openid.net/contact/email'), ('firstname', 'http://axschema.org/namePerson/first'), ('lastname', 'http://axschema.org/namePerson/last'), ('language', 'http://axschema.org/pref/language'), ('country', 'http://axschema.org/contact/country/home'), ) for short, long in SHORTNAMES: try: ax_items[short] = ax_response.get(long) except __HOLE__, e: pass logging.debug("ax_items: %r" % ax_items) openid_url = auth_response.getDisplayIdentifier() import models persons = models.Person.gql('WHERE openid = :1', openid_url) if persons.count() == 0: p = models.Person() p.openid = openid_url p.ax_dict().update(ax_items) p.sreg_dict().update(sreg_response) p.put() else: p = persons[0] changed = False for key in sreg_response: if not p.sreg_dict().has_key(key) or \ p.sreg_dict()[key] != sreg_response[key]: logging.debug("Setting sreg %s" % key) p.sreg_dict()[key] = sreg_response[key] changed = True for key in ax_items: if not p.ax_dict().has_key(key) or \ p.ax_dict()[key] != ax_items[key]: logging.info("Setting ax %s" % key) p.ax_dict()[key] = ax_items[key] changed = True if changed: p.put() s = openidgae.get_session(request, response) s.person = p.key() request.openidgae_logged_in_person = p s.put() continueUrl = get_continue_url(request, default_success_url) return django.http.HttpResponseRedirect(continueUrl) else: return show_main_page(request, 'OpenID verification failed :(')
KeyError
dataset/ETHPy150Open CollabQ/CollabQ/openidgae/views.py/OpenIDFinish
6,885
def get_access_token(): # Get a request token flickr.get_request_token(oauth_callback="oob") print("""In order to use Flickr with Onitu, you're going to need to let \ the Onitu Flickr app gain access to your Flickr account. To do so, the script is going to open a window in your web browser to the \ Flickr website where you'll have to copy/paste a verification code back \ here in the terminal. """) raw_input("If you're ready, press Enter.") # Open a browser at the authentication URL. Do this however # you want, as long as the user visits that URL. authorize_url = flickr.auth_url(perms='delete') webbrowser.open(authorize_url) # Get the verifier code from the user. Do this however you # want, as long as the user gives the application the code. verifier = raw_input("Paste the code here: ") try: verifier = unicode(verifier) except __HOLE__: # in python 3 # No unicode type, but that's ok, that's what the next function wants pass # Trade the request token for an access token flickr.get_access_token(verifier)
NameError
dataset/ETHPy150Open onitu/onitu/drivers/flickr/get_tokens.py/get_access_token
6,886
def _has_sqlite(self): from sqlalchemy import create_engine try: create_engine('sqlite://') return True except __HOLE__: return False
ImportError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/testing/requirements.py/SuiteRequirements._has_sqlite
6,887
def _has_cextensions(self): try: from sqlalchemy import cresultproxy, cprocessors return True except __HOLE__: return False
ImportError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/testing/requirements.py/SuiteRequirements._has_cextensions
6,888
def Remove(self): """Remove the file backing the block device. @rtype: boolean @return: True if the removal was successful """ try: os.remove(self.path) return True except __HOLE__ as err: if err.errno != errno.ENOENT: base.ThrowError("%s: can't remove: %s", self.path, err) return False
OSError
dataset/ETHPy150Open ganeti/ganeti/lib/storage/filestorage.py/FileDeviceHelper.Remove
6,889
def Size(self): """Return the actual disk size in bytes. @rtype: int @return: The file size in bytes. """ self.Exists(assert_exists=True) try: return os.stat(self.path).st_size except __HOLE__ as err: base.ThrowError("%s: can't stat: %s", self.path, err)
OSError
dataset/ETHPy150Open ganeti/ganeti/lib/storage/filestorage.py/FileDeviceHelper.Size
6,890
def Move(self, new_path): """Move file to a location inside the file storage dir. """ # Check that the file exists self.Exists(assert_exists=True) self.file_path_acceptance_fn(new_path) try: os.rename(self.path, new_path) self.path = new_path except __HOLE__, err: base.ThrowError("%s: can't rename to %s: ", str(err), new_path)
OSError
dataset/ETHPy150Open ganeti/ganeti/lib/storage/filestorage.py/FileDeviceHelper.Move
6,891
def GetFileStorageSpaceInfo(path): """Retrieves the free and total space of the device where the file is located. @type path: string @param path: Path of the file whose embracing device's capacity is reported. @return: a dictionary containing 'vg_size' and 'vg_free' given in MebiBytes """ try: result = os.statvfs(path) free = (result.f_frsize * result.f_bavail) / (1024 * 1024) size = (result.f_frsize * result.f_blocks) / (1024 * 1024) return {"type": constants.ST_FILE, "name": path, "storage_size": size, "storage_free": free} except __HOLE__, e: raise errors.CommandError("Failed to retrieve file system information about" " path: %s - %s" % (path, e.strerror))
OSError
dataset/ETHPy150Open ganeti/ganeti/lib/storage/filestorage.py/GetFileStorageSpaceInfo
6,892
def run_only_if_psycopg2_is_available(func): try: import psycopg2 except __HOLE__: psycopg2 = None pred = lambda: psycopg2 is not None return run_only(func, pred)
ImportError
dataset/ETHPy150Open Yelp/fullerite/src/diamond/collectors/slony/test/testslony.py/run_only_if_psycopg2_is_available
6,893
def run(self, end_time): # <1> """Schedule and display events until time is up""" # schedule the first event for each cab for _, proc in sorted(self.procs.items()): # <2> first_event = next(proc) # <3> self.events.put(first_event) # <4> # main loop of the simulation time = 0 while time < end_time: # <5> if self.events.empty(): # <6> print('*** end of events ***') break # get and display current event current_event = self.events.get() # <7> print('taxi:', current_event.proc, # <8> current_event.proc * ' ', current_event) # schedule next action for current proc time = current_event.time # <9> proc = self.procs[current_event.proc] # <10> try: next_event = proc.send(time) # <11> except __HOLE__: del self.procs[current_event.proc] # <12> else: self.events.put(next_event) # <13> else: # <14> msg = '*** end of simulation time: {} events pending ***' print(msg.format(self.events.qsize())) # END TAXI_SIMULATOR
StopIteration
dataset/ETHPy150Open fluentpython/example-code/16-coroutine/taxi_sim0.py/Simulator.run
6,894
def qstat(qconn=None, delay=None): def compare_tubes(last, cur): _cur = cur.copy() for _k, _v in last.items(): try: change = int(_cur[_k]) - int(_v) if change > 0: change = "+%s" % change elif change == 0: change = "" _cur[_k] = "%-5s %s" % (_cur[_k], change) except __HOLE__: pass # Not an number return _cur def compare_numbers(last, cur): _cur = cur change = int(_cur) - int(last) if change > -1: change = "+%s" % change _cur = "%s %s" % (_cur, change) return _cur if not qconn: qconn = _get_qconnection(QHOST, QPORT) tubes_stats_last = {} while True: LINE="%-24s %-10s %-10s %-10s %-10s %-10s" print LINE % ('tube', 'watching', 'buried', 'ready', 'delayed', 'reserved') tubes = qconn.tubes() tubes.sort() for tube in tubes: if tube: name = str(tube) tube_stats_cur = qconn.stats_tube(tube) if tubes_stats_last.get(name): tube_stats = compare_tubes(tubes_stats_last[name], tube_stats_cur) else: tube_stats = tube_stats_cur #tube_stats = qconn.stats_tube(tube) print LINE % (name, tube_stats.get('current-watching'), tube_stats.get('current-jobs-buried'), tube_stats.get('current-jobs-ready'), tube_stats.get('current-jobs-delayed'), tube_stats.get('current-jobs-reserved')) tubes_stats_last[name] = tube_stats_cur if delay: time.sleep(delay) else: return None
ValueError
dataset/ETHPy150Open chexov/queueit/beanstalkd/queueit/__init__.py/qstat
6,895
def main(): try: COMMAND = os.path.basename(sys.argv[0]) args = sys.argv[1:] if COMMAND == 'queueit': if len(sys.argv) == 1: print "Usage:" print "%s q-get" % COMMAND print "%s q-put" % COMMAND print "%s q-kick" % COMMAND print "%s q-stat" % COMMAND print "%s q-wrapper" % COMMAND print "%s q-wrapper-batch" % COMMAND print "%s q-wrapper-with-stats" % COMMAND print "%s q-peek" % COMMAND print "%s q-peek-ready" % COMMAND print "%s q-peek-delayed" % COMMAND print "%s q-peek-buried" % COMMAND sys.exit(1) else: COMMAND = os.path.basename(sys.argv[1]) args = sys.argv[2:] if COMMAND == 'q-get': if not len(args) == 1: print "Usage: %s <queue>" % (COMMAND) sys.exit(1) qget(args[0]) elif COMMAND == 'q-put': if len(args) == 1: qput(args[0], [sys.stdin.read(),]) elif len(args) > 1: qput(args[0], args[1:]) else: print "Usage: %s <queue> [<message>, <message>, ...]\n Message body could be sent trough STDIN" % (COMMAND ) sys.exit(1) elif COMMAND == 'q-kick': if len(args) < 1 or len(args) > 2: print "Usage: %s <queue> [<count>]" % COMMAND sys.exit(1) count = 1 if len(args) == 2: try: count = int(args[1]) except ValueError: print "Wrong count value '%s'. Using default %s" % (args[1], count) qkick(args[0], count) elif COMMAND == 'q-stat': if len(args) == 1: qstat(delay=int(args[0])) else: qstat() elif COMMAND == 'q-wrapper': if len(args) >= 3: qwrapper(args[0], args[1], worker_cmd=args[2:]) else: print "Usage: %s <queue-in> <queue-out> [<cmd>]\n <cmd> could be sent trough STDIN" % (COMMAND) print sys.exit(1) elif COMMAND == 'q-wrapper-with-stats': if len(args) == 4: qwrapperwithstats(stats_queue_name, job_id, command) else: print "Usage: %s <statistics-queue> <job_id> <cmd>" % (COMMAND) print sys.exit(1) elif COMMAND == 'q-wrapper-batch': if len(args) >= 4: qwrapperbatch(args[0], args[1], worker_cmd=args[3:], batch_size=int(args[2])) else: print "Usage: %s <queue-in> <queue-out> <batch-size> <cmd>" % (COMMAND) print sys.exit(1) elif COMMAND == 'q-cleanup': if len(args) == 1: qcleanup(args[0]) else: print "Usage: %s <queue>" % (COMMAND) print sys.exit(1) elif COMMAND == 'q-peek': if len(args) == 1: qpeekjob(args[0]) else: print "Usage: %s <job_id>" % (COMMAND) print sys.exit(1) elif COMMAND == 'q-peek-ready': if len(args) == 1: qpeeknext(args[0], peek_type="ready") else: print "Usage: %s <queue>" % (COMMAND) print sys.exit(1) elif COMMAND == 'q-peek-delayed': if len(args) == 1: qpeeknext(args[0], peek_type="delayed") else: print "Usage: %s <queue>" % (COMMAND) print sys.exit(1) elif COMMAND == 'q-peek-buried': if len(args) == 1: qpeeknext(args[0], peek_type="buried") else: print "Usage: %s <queue>" % (COMMAND) print sys.exit(1) else: print "Unknown command '%s'" % COMMAND except __HOLE__: print "Keyboard Interrupt. Bye-bye"
KeyboardInterrupt
dataset/ETHPy150Open chexov/queueit/beanstalkd/queueit/__init__.py/main
6,896
def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except __HOLE__: self.__root = root = [None, None, None] # sentinel node PREV = 0 NEXT = 1 root[PREV] = root[NEXT] = root self.__map = {} self.update(*args, **kwds)
AttributeError
dataset/ETHPy150Open baseblack/ReproWeb/3rdParty/python/ordereddict.py/OrderedDict.__init__
6,897
def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] self.__root[:] = [self.__root, self.__root, None] self.__map.clear() except __HOLE__: pass dict.clear(self)
AttributeError
dataset/ETHPy150Open baseblack/ReproWeb/3rdParty/python/ordereddict.py/OrderedDict.clear
6,898
def deactivate(self): """Remove this objective from the dependency graph and remove its pseudocomp from the scoping object. """ if self._pseudo is not None: scope = self.scope try: getattr(scope, self._pseudo.name) except __HOLE__: pass else: scope.remove(self._pseudo.name)
AttributeError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/hasobjective.py/Objective.deactivate
6,899
def evaluate(self, scope=None): """Use the value in the u vector if it exists instead of pulling the value from scope. """ if self.pcomp_name: scope = self._get_updated_scope(scope) try: system = getattr(scope, self.pcomp_name)._system vname = self.pcomp_name + '.out0' if scope._var_meta[vname].get('scalar'): return system.vec['u'][scope.name2collapsed[vname]][0] else: return system.vec['u'][scope.name2collapsed[vname]] except (__HOLE__, AttributeError): pass return super(Objective, self).evaluate(scope)
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/hasobjective.py/Objective.evaluate