Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
7,900
def search(dataset, node, lat=None, lng=None, distance=100, ll=None, ur=None, start_date=None, end_date=None, where=None, max_results=50000, starting_number=1, sort_order="DESC", api_key=None): """ :param dataset: :param node: :param lat: :param lng: :param ll: :param distance: :param ur: :param start_date: :param end_date: :param where: Specify additional search criteria :param max_results: :param starting_number: :param sort_order: :param api_key: API key is not required. """ root, body = create_root_request() el = create_request_type(body, "search") create_dataset_element(el, dataset) create_node_element(el, node) # Latitude and longitude take precedence over ll and ur if lat and lng: try: import pyproj from shapely import geometry except __HOLE__: raise USGSDependencyRequired("Shapely and PyProj are required for spatial searches.") prj = pyproj.Proj(proj='aeqd', lat_0=lat, lon_0=lng) half_distance = 0.5 * distance box = geometry.box(-half_distance, -half_distance, half_distance, half_distance) lngs, lats = prj(*box.exterior.xy, inverse=True) ll = { "longitude": min(*lngs), "latitude": min(*lats) } ur = { "longitude": max(*lngs), "latitude": max(*lats) } if ll and ur: create_service_class_coordinate(el, "lowerLeft", latitude=ll["latitude"], longitude=ll["longitude"]) create_service_class_coordinate(el, "upperRight", latitude=ur["latitude"], longitude=ur["longitude"]) if start_date: start_date_el = SubElement(el, "startDate") start_date_el.set("xsi:type", "xsd:string") start_date_el.text = start_date if end_date: end_date_el = SubElement(el, "endDate") end_date_el.set("xsi:type", "xsd:string") end_date_el.text = end_date if where: # TODO: Support more than AND key/value equality queries additional_criteria_el = SubElement(el, "additionalCriteria") filter_type_el = SubElement(additional_criteria_el, "filterType") filter_type_el.text = "and" child_filters_el = SubElement(additional_criteria_el, "childFilters") for field_id, value in where.iteritems(): child_item_el = SubElement(child_filters_el, "item") field_id_el = SubElement(child_item_el, "fieldId") field_id_el.text = str(field_id) item_filter_type_el = SubElement(child_item_el, "filterType") item_filter_type_el.text = "value" operand_el = SubElement(child_item_el, "operand") operand_el.text = "=" value_el = SubElement(child_item_el, "value") value_el.text = str(value) if max_results: max_results_el = SubElement(el, "maxResults") max_results_el.set("xsi:type", "xsd:int") max_results_el.text = str(max_results) if starting_number: starting_number_el = SubElement(el, "startingNumber") starting_number_el.set("xsi:type", "xsd:int") starting_number_el.text = str(starting_number) if sort_order: sort_order_el = SubElement(el, "sortOrder") sort_order_el.set("xsi:type", "xsd:string") sort_order_el.text = sort_order if api_key: create_api_key_element(el, api_key) return tostring(root)
ImportError
dataset/ETHPy150Open mapbox/usgs/usgs/soap.py/search
7,901
def __init__(self, fp, seekable = 1): """Initialize the class instance and read the headers.""" if seekable == 1: # Exercise tell() to make sure it works # (and then assume seek() works, too) try: fp.tell() except (__HOLE__, IOError): seekable = 0 self.fp = fp self.seekable = seekable self.startofheaders = None self.startofbody = None # if self.seekable: try: self.startofheaders = self.fp.tell() except IOError: self.seekable = 0 # self.readheaders() # if self.seekable: try: self.startofbody = self.fp.tell() except IOError: self.seekable = 0
AttributeError
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/rfc822.py/Message.__init__
7,902
def readheaders(self): """Read header lines. Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not included in the returned list. If a non-header line ends the headers, (which is an error), an attempt is made to backspace over it; it is never included in the returned list. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a completely uninterpreted list of lines contained in the header (so printing them will reproduce the header exactly as it appears in the file). """ self.dict = {} self.unixfrom = '' self.headers = lst = [] self.status = '' headerseen = "" firstline = 1 startofline = unread = tell = None if hasattr(self.fp, 'unread'): unread = self.fp.unread elif self.seekable: tell = self.fp.tell while 1: if tell: try: startofline = tell() except __HOLE__: startofline = tell = None self.seekable = 0 line = self.fp.readline() if not line: self.status = 'EOF in headers' break # Skip unix From name time lines if firstline and line.startswith('From '): self.unixfrom = self.unixfrom + line continue firstline = 0 if headerseen and line[0] in ' \t': # It's a continuation line. lst.append(line) x = (self.dict[headerseen] + "\n " + line.strip()) self.dict[headerseen] = x.strip() continue elif self.iscomment(line): # It's a comment. Ignore it. continue elif self.islast(line): # Note! No pushback here! The delimiter line gets eaten. break headerseen = self.isheader(line) if headerseen: # It's a legal header line, save it. lst.append(line) self.dict[headerseen] = line[len(headerseen)+1:].strip() continue else: # It's not a header line; throw it back and stop here. if not self.dict: self.status = 'No headers' else: self.status = 'Non-header line where header expected' # Try to undo the read. if unread: unread(line) elif tell: self.fp.seek(startofline) else: self.status = self.status + '; bad seek' break
IOError
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/rfc822.py/Message.readheaders
7,903
def getdate(self, name): """Retrieve a date field from a header. Retrieves a date field from the named header, returning a tuple compatible with time.mktime(). """ try: data = self[name] except __HOLE__: return None return parsedate(data)
KeyError
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/rfc822.py/Message.getdate
7,904
def getdate_tz(self, name): """Retrieve a date field from a header as a 10-tuple. The first 9 elements make up a tuple compatible with time.mktime(), and the 10th is the offset of the poster's time zone from GMT/UTC. """ try: data = self[name] except __HOLE__: return None return parsedate_tz(data) # Access as a dictionary (only finds *last* header of each type):
KeyError
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/rfc822.py/Message.getdate_tz
7,905
def parsedate_tz(data): """Convert a date string to a time tuple. Accounts for military timezones. """ if not data: return None data = data.split() if data[0][-1] in (',', '.') or data[0].lower() in _daynames: # There's a dayname here. Skip it del data[0] else: # no space after the "weekday,"? i = data[0].rfind(',') if i >= 0: data[0] = data[0][i+1:] if len(data) == 3: # RFC 850 date, deprecated stuff = data[0].split('-') if len(stuff) == 3: data = stuff + data[1:] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') # Dummy tz if len(data) < 5: return None data = data[:5] [dd, mm, yy, tm, tz] = data mm = mm.lower() if not mm in _monthnames: dd, mm = mm, dd.lower() if not mm in _monthnames: return None mm = _monthnames.index(mm)+1 if mm > 12: mm = mm - 12 if dd[-1] == ',': dd = dd[:-1] i = yy.find(':') if i > 0: yy, tm = tm, yy if yy[-1] == ',': yy = yy[:-1] if not yy[0].isdigit(): yy, tz = tz, yy if tm[-1] == ',': tm = tm[:-1] tm = tm.split(':') if len(tm) == 2: [thh, tmm] = tm tss = '0' elif len(tm) == 3: [thh, tmm, tss] = tm else: return None try: yy = int(yy) dd = int(dd) thh = int(thh) tmm = int(tmm) tss = int(tss) except __HOLE__: return None tzoffset = None tz = tz.upper() if tz in _timezones: tzoffset = _timezones[tz] else: try: tzoffset = int(tz) except ValueError: pass # Convert a timezone offset into seconds ; -0500 -> -18000 if tzoffset: if tzoffset < 0: tzsign = -1 tzoffset = -tzoffset else: tzsign = 1 tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60) return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset)
ValueError
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/rfc822.py/parsedate_tz
7,906
def _set_status_code(self, code): self._status_code = code try: self._status = '%d %s' % (code, HTTP_STATUS_CODES[code].upper()) except __HOLE__: self._status = '%d UNKNOWN' % code
KeyError
dataset/ETHPy150Open Eforcers/gae-flask-todo/lib/werkzeug/wrappers.py/BaseResponse._set_status_code
7,907
def _set_status(self, value): self._status = to_native(value) try: self._status_code = int(self._status.split(None, 1)[0]) except __HOLE__: self._status_code = 0 self._status = '0 %s' % self._status
ValueError
dataset/ETHPy150Open Eforcers/gae-flask-todo/lib/werkzeug/wrappers.py/BaseResponse._set_status
7,908
def calculate_content_length(self): """Returns the content length if available or `None` otherwise.""" try: self._ensure_sequence() except __HOLE__: return None return sum(len(x) for x in self.response)
RuntimeError
dataset/ETHPy150Open Eforcers/gae-flask-todo/lib/werkzeug/wrappers.py/BaseResponse.calculate_content_length
7,909
@property def is_streamed(self): """If the response is streamed (the response is not an iterable with a length information) this property is `True`. In this case streamed means that there is no information about the number of iterations. This is usually `True` if a generator is passed to the response object. This is useful for checking before applying some sort of post filtering that should not take place for streamed responses. """ try: len(self.response) except (__HOLE__, AttributeError): return True return False
TypeError
dataset/ETHPy150Open Eforcers/gae-flask-todo/lib/werkzeug/wrappers.py/BaseResponse.is_streamed
7,910
def is_json(response): """ Check if the response parameter is a valid JSON string :param response: :return: """ try: json.loads(response) except __HOLE__: return False return True
ValueError
dataset/ETHPy150Open farshield/pathfinder/src/pathfinder/model/tripwire.py/is_json
7,911
def convert_to_int(s): """ Convert string to integer :param s: Input string :return: Interpreted value if successful, 0 otherwise """ try: nr = int(s) except (ValueError, __HOLE__): nr = 0 return nr
TypeError
dataset/ETHPy150Open farshield/pathfinder/src/pathfinder/model/tripwire.py/convert_to_int
7,912
def test_disks(self): ps_parts = psutil.disk_partitions(all=True) wmi_parts = wmi.WMI().Win32_LogicalDisk() for ps_part in ps_parts: for wmi_part in wmi_parts: if ps_part.device.replace('\\', '') == wmi_part.DeviceID: if not ps_part.mountpoint: # this is usually a CD-ROM with no disk inserted break try: usage = psutil.disk_usage(ps_part.mountpoint) except __HOLE__: err = sys.exc_info()[1] if err.errno == errno.ENOENT: # usually this is the floppy break else: raise self.assertEqual(usage.total, int(wmi_part.Size)) wmi_free = int(wmi_part.FreeSpace) self.assertEqual(usage.free, wmi_free) # 10 MB tollerance if abs(usage.free - wmi_free) > 10 * 1024 * 1024: self.fail("psutil=%s, wmi=%s" % usage.free, wmi_free) break else: self.fail("can't find partition %s" % repr(ps_part))
OSError
dataset/ETHPy150Open packages/psutil/test/_windows.py/WindowsSpecificTestCase.test_disks
7,913
def wrap_exceptions(callable): def wrapper(self, *args, **kwargs): try: return callable(self, *args, **kwargs) except __HOLE__: err = sys.exc_info()[1] if err.errno in ACCESS_DENIED_SET: raise psutil.AccessDenied(None, None) if err.errno == errno.ESRCH: raise psutil.NoSuchProcess(None, None) raise return wrapper
OSError
dataset/ETHPy150Open packages/psutil/test/_windows.py/wrap_exceptions
7,914
def test_compare_values(self): # Certain APIs on Windows have 2 internal implementations, one # based on documented Windows APIs, another one based # NtQuerySystemInformation() which gets called as fallback in # case the first fails because of limited permission error. # Here we test that the two methods return the exact same value, # see: # http://code.google.com/p/psutil/issues/detail?id=304 def assert_ge_0(obj): if isinstance(obj, tuple): for value in obj: assert value >= 0, value elif isinstance(obj, (int, long, float)): assert obj >= 0, obj else: assert 0 # case not handled which needs to be fixed def compare_with_tolerance(ret1, ret2, tolerance): if ret1 == ret2: return else: if isinstance(ret2, (int, long, float)): diff = abs(ret1 - ret2) assert diff <= tolerance, diff elif isinstance(ret2, tuple): for a, b in zip(ret1, ret2): diff = abs(a - b) assert diff <= tolerance, diff failures = [] for name, tolerance in self.fun_names: meth1 = wrap_exceptions(getattr(_psutil_mswindows, name)) meth2 = wrap_exceptions(getattr(_psutil_mswindows, name + '_2')) for p in psutil.process_iter(): # try: ret1 = meth1(p.pid) except psutil.NoSuchProcess: continue except psutil.AccessDenied: ret1 = None # try: ret2 = meth2(p.pid) except psutil.NoSuchProcess: # this is supposed to fail only in case of zombie process # never for permission error continue # compare values try: if ret1 is None: assert_ge_0(ret2) else: compare_with_tolerance(ret1, ret2, tolerance) assert_ge_0(ret1) assert_ge_0(ret2) except __HOLE__: err = sys.exc_info()[1] trace = traceback.format_exc() msg = '%s\npid=%s, method=%r, ret_1=%r, ret_2=%r' \ % (trace, p.pid, name, ret1, ret2) failures.append(msg) break if failures: self.fail('\n\n'.join(failures))
AssertionError
dataset/ETHPy150Open packages/psutil/test/_windows.py/TestDualProcessImplementation.test_compare_values
7,915
def validate(modelDocument, schemaElement, targetNamespace): modelXbrl = modelDocument.modelXbrl modelManager = modelXbrl.modelManager """ if not hasattr(modelManager, "xmlSchemaSchema"): if getattr(modelManager, "modelXmlSchemaIsLoading", False): return startedAt = time.time() modelManager.modelXmlSchemaIsLoading = True priorValidateDisclosureSystem = modelManager.validateDisclosureSystem modelManager.validateDisclosureSystem = False modelManager.xmlSchemaSchema = ModelXbrl.load(modelManager, XMLSchemaURI, _("validate schema")) modelManager.validateDisclosureSystem = priorValidateDisclosureSystem ''' filePath = modelManager.cntlr.webCache.getfilename(XMLSchemaURI) modelManager.showStatus(_("lxml compiling XML Schema for Schemas")) modelManager.xmlSchemaSchema = etree.XMLSchema(file=filePath) ''' modelXbrl.info("info:xmlSchemaValidator", format_string(modelXbrl.modelManager.locale, _("schema for XML schemas loaded into lxml %.3f secs"), time.time() - startedAt), modelDocument=XMLSchemaURI) modelManager.showStatus("") del modelManager.modelXmlSchemaIsLoading ''' #startedAt = time.time() #validationSuccess = modelManager.xmlSchemaSchema.validate(schemaElement) #modelXbrl.info("info:xmlSchemaValidator", format_string(modelXbrl.modelManager.locale, # _("schema validated in %.3f secs"), # time.time() - startedAt), # modelDocument=modelDocument) if not validationSuccess: for error in modelManager.xmlSchemaSchema.error_log: modelXbrl.error("xmlSchema:syntax", _("%(error)s, %(fileName)s, line %(line)s, column %(column)s, %(sourceAction)s source element"), modelObject=modelDocument, fileName=modelDocument.basename, error=error.message, line=error.line, column=error.column, sourceAction=("xml schema")) modelManager.xmlSchemaSchema._clear_error_log() ''' """ #XmlValidate.validate(modelXbrl, schemaElement) # use arelle schema validation declaredNamespaces = set(doc.targetNamespace for doc, docRef in modelDocument.referencesDocument.items() if docRef.referenceType in ("include", "import")) if targetNamespace: declaredNamespaces.add(targetNamespace) if targetNamespace in ("http://www.w3.org/2001/XMLSchema", "http://www.w3.org/XML/1998/namespace", ): # or ( # targetNamespace and targetNamespace.startswith("http://www.w3.org/1999/xhtml")): return # don't validate w3c schemas # check schema semantics def resolvedQnames(elt, qnDefs): for attrName, attrType, mdlObjects, isQualifiedForm in qnDefs: attr = elt.get(attrName) if attr is not None: try: qnValue = elt.schemaNameQname(attr, isQualifiedForm=isQualifiedForm or elt.isQualifiedForm, prefixException=ValueError) if qnValue.namespaceURI == XbrlConst.xsd: if attrType != ModelType: raise ValueError("{0} can not have xml schema namespace".format(attrName)) if qnValue.localName not in { "anySimpleType", "anyType", "string", "boolean", "float", "double", "decimal", "duration", "dateTime", "time", "date", "gYearMonth", "gYear", "gMonthDay", "gDay", "gMonth", "hexBinary", "base64Binary", "anyURI", "QName", "NOTATION", "normalizedString", "token", "language", "IDREFS", "ENTITIES", "NMTOKEN", "NMTOKENS", "NCName", "ID", "IDREF", "integer", "nonPositiveInteger", "negativeInteger", "long", "int", "short", "byte", "nonNegativeInteger", "unsignedLong", "unsignedInt", "unsignedShort", "unsignedByte", "positiveInteger" }: raise ValueError("{0} qname {1} not recognized".format(attrName, attr)) # qname must be defined in an imported or included schema elif qnValue.namespaceURI and qnValue.namespaceURI not in declaredNamespaces: raise ValueError("Namespace is not defined by an import or include element") elif qnValue not in mdlObjects: raise ValueError("{0} is not defined".format(attrName)) elif not isinstance(mdlObjects[qnValue], attrType): raise ValueError("{0} not resolved to expected object type".format(attrName)) except __HOLE__ as err: modelXbrl.error("xmlSchema:valueError", _("Element attribute %(typeName)s value error: %(value)s, %(error)s"), modelObject=elt, typeName=attrName, value=attr, error=err) def checkSchemaElements(parentElement): for elt in parentElement.iterchildren(): if isinstance(elt,ModelObject) and elt.namespaceURI == XbrlConst.xsd: ln = elt.localName if ln == "element": resolvedQnames(elt, (("ref", ModelConcept, modelXbrl.qnameConcepts, False), ("substitutionGroup", ModelConcept, modelXbrl.qnameConcepts, True), ("type", ModelType, modelXbrl.qnameTypes, True))) elif ln == "attribute": resolvedQnames(elt, (("ref", ModelAttribute, modelXbrl.qnameAttributes, False), ("type", ModelType, modelXbrl.qnameTypes, True))) checkSchemaElements(elt) checkSchemaElements(schemaElement)
ValueError
dataset/ETHPy150Open Arelle/Arelle/arelle/XmlValidateSchema.py/validate
7,916
@tornado.gen.coroutine def interpolate_text(self, text): """Evaluate a bunch of (already-looked-up) interpolation markup. """ self.task.tick() nodls = twcommon.interp.parse(text) # While trawling through nodls, we may encounter $if/$end # nodes. This keeps track of them. Specifically: a 0 value # means "within a true conditional"; 1 means "within a # false conditional"; 2 means "in an else/elif after a true # conditional." suppstack = [] # We suppress output if any value in suppstack is nonzero. # It's easiest to track sum(suppstack), so that's what this is. suppressed = 0 for nod in nodls: if not (isinstance(nod, InterpNode)): # String. if nod and not suppressed: self.accum_append(nod, raw=True) continue nodkey = nod.classname # This switch statement might be better off as a method # lookup table. But only if it gets long. if nodkey == 'If': if suppressed: # Can't get any more suppressed. suppstack.append(0) continue try: ifval = yield self.evalobj(nod.expr, evaltype=EVALTYPE_CODE) except LookupError: # includes SymbolError ifval = None except __HOLE__: ifval = None if ifval: suppstack.append(0) else: suppstack.append(1) suppressed += 1 continue if nodkey == 'ElIf': if len(suppstack) == 0: self.accum_append('[$elif without matching $if]') continue if not suppressed: # We follow a successful "if". Suppress now. suppstack[-1] = 2 suppressed = sum(suppstack) continue if suppstack[-1] == 2: # We had a successful "if" earlier, so no change. continue # We follow an unsuccessful "if". Maybe suppress. try: ifval = yield self.evalobj(nod.expr, evaltype=EVALTYPE_CODE) except LookupError: # includes SymbolError ifval = None except AttributeError: ifval = None if ifval: suppstack[-1] = 0 else: suppstack[-1] = 1 suppressed = sum(suppstack) continue if nodkey == 'End': if len(suppstack) == 0: self.accum_append('[$end without matching $if]') continue suppstack.pop() suppressed = sum(suppstack) continue if nodkey == 'Else': if len(suppstack) == 0: self.accum_append('[$else without matching $if]') continue val = suppstack[-1] if val == 1: val = 0 else: val = 2 suppstack[-1] = val suppressed = sum(suppstack) continue # The rest of these nodes cannot affect the suppression # state. if suppressed: continue if nodkey == 'Link': # Non-printing element, append directly if not nod.external: ackey = EvalPropContext.build_action_key() self.linktargets[ackey] = nod.target self.accum.append( ['link', ackey] ) else: self.accum.append( ['exlink', nod.target] ) continue if nodkey == 'Interpolate': try: subres = yield self.evalobj(nod.expr, evaltype=EVALTYPE_CODE) except LookupError: # includes SymbolError: continue except AttributeError: continue # {text} objects have already added their contents to # the accum array. if not (subres is None or subres == ''): # Anything not a {text} object gets interpolated as # a string. self.accum_append(str(subres), raw=True) continue if nodkey == 'PlayerRef': if nod.expr: uid = yield self.evalobj(nod.expr, evaltype=EVALTYPE_CODE) if isinstance(uid, two.execute.PlayerProxy): uid = uid.uid else: uid = ObjectId(uid) else: uid = self.uid player = yield motor.Op(self.app.mongodb.players.find_one, {'_id':uid}, {'name':1, 'pronoun':1}) if not player: self.accum.append('[No such player]') continue if nod.key == 'name': self.accum_append(player['name'], raw=True) else: self.accum_append(two.grammar.resolve_pronoun(player, nod.key), raw=True) continue if nodkey == 'OpenBracket': self.accum_append('[', raw=True) continue if nodkey == 'CloseBracket': self.accum_append(']', raw=True) continue # Otherwise... # Non-printing element, append directly self.accum.append(nod.describe()) # End of nodls interaction. if len(suppstack) > 0: self.accum.append('[$if without matching $end]') # We used raw mode, but if the ctx is in cooked mode, we'll fake # in WordNode state at the end. if self.cooked and self.textstate is twcommon.gentext.RunOnNode: self.textstate = twcommon.gentext.WordNode
AttributeError
dataset/ETHPy150Open erkyrath/tworld/lib/two/evalctx.py/EvalPropContext.interpolate_text
7,917
def profile(self): """ Runs the test specified by the test file given to the constructor, and returns a list of HAR files (one for each navigation) """ # list of list of har files: [[hars from run 1], [hars from run 2], ...] iteration_hars = [] for x in range(0, self._iterations): hars = [] self.clear_http_cache() self.clear_cookies() # Navigate to about:blank, to reset memory, etc. self._page_event_handler = PageEventHandler() self.start_page_event_monitoring(self._page_event_handler.process_event) self.navigate_to('about:blank') wait_until(lambda: self._page_event_handler.page_loaded) self.stop_page_event_monitoring() for navigation in self._test['navigations']: assert len(navigation) > 0, 'Each navigation must have at least one action' # do all the actions except the last one, because the last action causes the actual page navigation for i in range(0, len(navigation) - 1): self.process_action(navigation[i]) self._network_event_handler = NetworkEventHandler() try: wait_for_page_load_event = navigation[-1]['wait-for-page-load'] except __HOLE__: wait_for_page_load_event = True try: self._page_load_notifier = PageLoadNotifier(wait_for_page_load_event, navigation[-1]['network-timeout']) except KeyError: self._page_load_notifier = PageLoadNotifier(wait_for_page_load_event) self._timeline_event_handler = TimelineEventHandler() self.start_network_monitoring(self._network_event_handler.process_event) self.start_timeline_monitoring(self._timeline_event_handler.process_event) self.start_page_event_monitoring(self._page_load_notifier.process_page_event) self._communicator.add_domain_callback('Network', 'page_load_notifier', self._page_load_notifier.process_network_event) self._communicator.add_domain_callback('Timeline', 'page_load_notifier', self._page_load_notifier.process_timeline_event) self.start_css_selector_profiling() self.process_action(navigation[-1]) wait_until(lambda: self._page_load_notifier.page_loaded()) self.stop_page_event_monitoring() self.stop_timeline_monitoring() self.stop_network_monitoring() self._css_profiler_handler = CSSProfileParser(self.stop_css_selector_profiling()) hars.append(self.make_har(navigation[-1]['page-name'])) iteration_hars.append(hars) return iteration_hars
KeyError
dataset/ETHPy150Open linkedin/mobster/src/linkedin/mobster/har/flowprofiler.py/FlowProfiler.profile
7,918
def prepare_files(self, finder, force_root_egg_info=False, bundle=False): """Prepare process. Create temp directories, download and/or unpack files.""" unnamed = list(self.unnamed_requirements) reqs = list(self.requirements.values()) while reqs or unnamed: if unnamed: req_to_install = unnamed.pop(0) else: req_to_install = reqs.pop(0) install = True best_installed = False not_found = None if not self.ignore_installed and not req_to_install.editable: req_to_install.check_if_exists() if req_to_install.satisfied_by: if self.upgrade: if not self.force_reinstall and not req_to_install.url: try: url = finder.find_requirement( req_to_install, self.upgrade) except BestVersionAlreadyInstalled: best_installed = True install = False except DistributionNotFound: not_found = sys.exc_info()[1] else: # Avoid the need to call find_requirement again req_to_install.url = url.url if not best_installed: #don't uninstall conflict if user install and conflict is not user install if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)): req_to_install.conflicts_with = req_to_install.satisfied_by req_to_install.satisfied_by = None else: install = False if req_to_install.satisfied_by: if best_installed: logger.notify('Requirement already up-to-date: %s' % req_to_install) else: logger.notify('Requirement already satisfied ' '(use --upgrade to upgrade): %s' % req_to_install) if req_to_install.editable: logger.notify('Obtaining %s' % req_to_install) elif install: if req_to_install.url and req_to_install.url.lower().startswith('file:'): logger.notify('Unpacking %s' % display_path(url_to_path(req_to_install.url))) else: logger.notify('Downloading/unpacking %s' % req_to_install) logger.indent += 2 try: is_bundle = False if req_to_install.editable: if req_to_install.source_dir is None: location = req_to_install.build_location(self.src_dir) req_to_install.source_dir = location else: location = req_to_install.source_dir if not os.path.exists(self.build_dir): _make_build_dir(self.build_dir) req_to_install.update_editable(not self.is_download) if self.is_download: req_to_install.run_egg_info() req_to_install.archive(self.download_dir) else: req_to_install.run_egg_info() elif install: ##@@ if filesystem packages are not marked ##editable in a req, a non deterministic error ##occurs when the script attempts to unpack the ##build directory # NB: This call can result in the creation of a temporary build directory location = req_to_install.build_location(self.build_dir, not self.is_download) ## FIXME: is the existance of the checkout good enough to use it? I don't think so. unpack = True url = None if not os.path.exists(os.path.join(location, 'setup.py')): ## FIXME: this won't upgrade when there's an existing package unpacked in `location` if req_to_install.url is None: if not_found: raise not_found url = finder.find_requirement(req_to_install, upgrade=self.upgrade) else: ## FIXME: should req_to_install.url already be a link? url = Link(req_to_install.url) assert url if url: try: self.unpack_url(url, location, self.is_download) except __HOLE__: e = sys.exc_info()[1] logger.fatal('Could not install requirement %s because of error %s' % (req_to_install, e)) raise InstallationError( 'Could not install requirement %s because of HTTP error %s for URL %s' % (req_to_install, e, url)) else: unpack = False if unpack: is_bundle = req_to_install.is_bundle if is_bundle: req_to_install.move_bundle_files(self.build_dir, self.src_dir) for subreq in req_to_install.bundle_requirements(): reqs.append(subreq) self.add_requirement(subreq) elif self.is_download: req_to_install.source_dir = location req_to_install.run_egg_info() if url and url.scheme in vcs.all_schemes: req_to_install.archive(self.download_dir) else: req_to_install.source_dir = location req_to_install.run_egg_info() if force_root_egg_info: # We need to run this to make sure that the .egg-info/ # directory is created for packing in the bundle req_to_install.run_egg_info(force_root_egg_info=True) req_to_install.assert_source_matches_version() #@@ sketchy way of identifying packages not grabbed from an index if bundle and req_to_install.url: self.copy_to_build_dir(req_to_install) install = False # req_to_install.req is only avail after unpack for URL pkgs # repeat check_if_exists to uninstall-on-upgrade (#14) req_to_install.check_if_exists() if req_to_install.satisfied_by: if self.upgrade or self.ignore_installed: #don't uninstall conflict if user install and and conflict is not user install if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)): req_to_install.conflicts_with = req_to_install.satisfied_by req_to_install.satisfied_by = None else: install = False if not is_bundle: ## FIXME: shouldn't be globally added: finder.add_dependency_links(req_to_install.dependency_links) if (req_to_install.extras): logger.notify("Installing extra requirements: %r" % ','.join(req_to_install.extras)) if not self.ignore_dependencies: for req in req_to_install.requirements(req_to_install.extras): try: name = pkg_resources.Requirement.parse(req).project_name except ValueError: e = sys.exc_info()[1] ## FIXME: proper warning logger.error('Invalid requirement: %r (%s) in requirement %s' % (req, e, req_to_install)) continue if self.has_requirement(name): ## FIXME: check for conflict continue subreq = InstallRequirement(req, req_to_install) reqs.append(subreq) self.add_requirement(subreq) if not self.has_requirement(req_to_install.name): #'unnamed' requirements will get added here self.add_requirement(req_to_install) if self.is_download or req_to_install._temp_build_dir is not None: self.reqs_to_cleanup.append(req_to_install) else: self.reqs_to_cleanup.append(req_to_install) if install: self.successfully_downloaded.append(req_to_install) if bundle and (req_to_install.url and req_to_install.url.startswith('file:///')): self.copy_to_build_dir(req_to_install) finally: logger.indent -= 2
HTTPError
dataset/ETHPy150Open balanced/status.balancedpayments.com/venv/lib/python2.7/site-packages/pip-1.3.1-py2.7.egg/pip/req.py/RequirementSet.prepare_files
7,919
def remove(self): logger.info('Removing pth entries from %s:' % self.file) fh = open(self.file, 'rb') # windows uses '\r\n' with py3k, but uses '\n' with py2.x lines = fh.readlines() self._saved_lines = lines fh.close() if any(b('\r\n') in line for line in lines): endline = '\r\n' else: endline = '\n' for entry in self.entries: try: logger.info('Removing entry: %s' % entry) lines.remove(b(entry + endline)) except __HOLE__: pass fh = open(self.file, 'wb') fh.writelines(lines) fh.close()
ValueError
dataset/ETHPy150Open balanced/status.balancedpayments.com/venv/lib/python2.7/site-packages/pip-1.3.1-py2.7.egg/pip/req.py/UninstallPthEntries.remove
7,920
def readline(self): try: try: return next(self._gen) except NameError: return self._gen.next() except __HOLE__: return ''
StopIteration
dataset/ETHPy150Open balanced/status.balancedpayments.com/venv/lib/python2.7/site-packages/pip-1.3.1-py2.7.egg/pip/req.py/FakeFile.readline
7,921
def __getattr__(self, attr): if attr in self._particles: particle = self._particles[attr] i, j = self._match[particle] self._check_valid_indexes(i, j, attr) match = Match(self._match, self._words, i, j) return particle.interpret(match) try: i, j = self._match[attr] except __HOLE__: message = "'{}' object has no attribute '{}'" raise AttributeError(message.format(self.__class__.__name__, attr)) self._check_valid_indexes(i, j, attr) return WordList(self._words[i:j])
KeyError
dataset/ETHPy150Open machinalis/quepy/quepy/parsing.py/Match.__getattr__
7,922
def get_interpretation(self, words): rulename = self.__class__.__name__ logger.debug("Trying to match with regex: {}".format(rulename)) match = refo.match(self.regex + Literal(_EOL), words + [_EOL]) if not match: logger.debug("No match") return None, None try: match = Match(match, words) result = self.interpret(match) except BadSemantic as error: logger.debug(str(error)) return None, None try: expression, userdata = result except __HOLE__: expression, userdata = result, None expression.rule_used = rulename return expression, userdata
TypeError
dataset/ETHPy150Open machinalis/quepy/quepy/parsing.py/QuestionTemplate.get_interpretation
7,923
def _get_datastore(self, datastore_name=None): """ Returns the couch datastore instance and datastore name. This caches the datastore instance to avoid an explicit lookup to save on http request. The consequence is that if another process deletes the datastore in the meantime, we will fail later. """ datastore_name = self._get_datastore_name(datastore_name) if datastore_name in self._datastore_cache: return self._datastore_cache[datastore_name], datastore_name try: ds = self.server[datastore_name] # Note: causes http lookup self._datastore_cache[datastore_name] = ds return ds, datastore_name except ResourceNotFound: raise NotFound("Datastore '%s' does not exist" % datastore_name) except __HOLE__: raise BadRequest("Datastore name '%s' invalid" % datastore_name) except ServerError as se: raise BadRequest("Data store name %s invalid" % datastore_name)
ValueError
dataset/ETHPy150Open ooici/pyon/pyon/datastore/couchdb/base_store.py/CouchDataStore._get_datastore
7,924
def _create_datastore(self, datastore_name): try: self.server.create(datastore_name) except PreconditionFailed: raise BadRequest("Datastore with name %s already exists" % datastore_name) except __HOLE__: raise BadRequest("Datastore name %s invalid" % datastore_name) except ServerError as se: if se.message[1][0] == 'illegal_database_name': raise BadRequest("Data store name %s invalid" % datastore_name) else: raise
ValueError
dataset/ETHPy150Open ooici/pyon/pyon/datastore/couchdb/base_store.py/CouchDataStore._create_datastore
7,925
def delete_datastore(self, datastore_name=None): try: super(CouchDataStore, self).delete_datastore(datastore_name) except ResourceNotFound: raise NotFound('Datastore %s does not exist' % datastore_name) except __HOLE__: raise BadRequest("Datastore name %s invalid" % datastore_name) except ServerError as se: if se.message[1][0] == 'illegal_database_name': raise BadRequest("Data store name %s invalid" % datastore_name) else: raise
ValueError
dataset/ETHPy150Open ooici/pyon/pyon/datastore/couchdb/base_store.py/CouchDataStore.delete_datastore
7,926
def test_fetch_rcv1(): try: data1 = fetch_rcv1(shuffle=False, download_if_missing=False) except __HOLE__ as e: if e.errno == errno.ENOENT: raise SkipTest("Download RCV1 dataset to run this test.") X1, Y1 = data1.data, data1.target cat_list, s1 = data1.target_names.tolist(), data1.sample_id # test sparsity assert_true(sp.issparse(X1)) assert_true(sp.issparse(Y1)) assert_equal(60915113, X1.data.size) assert_equal(2606875, Y1.data.size) # test shapes assert_equal((804414, 47236), X1.shape) assert_equal((804414, 103), Y1.shape) assert_equal((804414,), s1.shape) assert_equal(103, len(cat_list)) # test ordering of categories first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151'] assert_array_equal(first_categories, cat_list[:6]) # test number of sample for some categories some_categories = ('GMIL', 'E143', 'CCAT') number_non_zero_in_cat = (5, 1206, 381327) for num, cat in zip(number_non_zero_in_cat, some_categories): j = cat_list.index(cat) assert_equal(num, Y1[:, j].data.size) # test shuffling and subset data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77, download_if_missing=False) X2, Y2 = data2.data, data2.target s2 = data2.sample_id # The first 23149 samples are the training samples assert_array_equal(np.sort(s1[:23149]), np.sort(s2)) # test some precise values some_sample_ids = (2286, 3274, 14042) for sample_id in some_sample_ids: idx1 = s1.tolist().index(sample_id) idx2 = s2.tolist().index(sample_id) feature_values_1 = X1[idx1, :].toarray() feature_values_2 = X2[idx2, :].toarray() assert_almost_equal(feature_values_1, feature_values_2) target_values_1 = Y1[idx1, :].toarray() target_values_2 = Y2[idx2, :].toarray() assert_almost_equal(target_values_1, target_values_2)
IOError
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/datasets/tests/test_rcv1.py/test_fetch_rcv1
7,927
def import_string(dotted_path, error_prefix=''): try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError: raise ImproperlyConfigured("%s%s doesn't look like a module path" % ( error_prefix, dotted_path)) try: module = import_module(module_path) except __HOLE__ as e: msg = '%sError importing module %s: "%s"' % ( error_prefix, module_path, e) six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2]) try: attr = getattr(module, class_name) except AttributeError: raise ImproperlyConfigured('%sModule "%s" does not define a "%s" attribute/class' % ( error_prefix, module_path, class_name)) return attr
ImportError
dataset/ETHPy150Open mwarkentin/django-watchman/watchman/utils.py/import_string
7,928
def collect(self): """Collect statistics from a Nagios perfdata directory. """ perfdata_dir = self.config['perfdata_dir'] try: filenames = os.listdir(perfdata_dir) except __HOLE__: self.log.error("Cannot read directory `{dir}'".format( dir=perfdata_dir)) return for filename in filenames: self._process_file(os.path.join(perfdata_dir, filename))
OSError
dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/nagiosperfdata/nagiosperfdata.py/NagiosPerfdataCollector.collect
7,929
def _parse_perfdata(self, s): """Parse performance data from a perfdata string """ metrics = [] counters = re.findall(self.TOKENIZER_RE, s) if counters is None: self.log.warning("Failed to parse performance data: {s}".format( s=s)) return metrics for (key, value, uom, warn, crit, min, max) in counters: try: norm_value = self._normalize_to_unit(float(value), uom) metrics.append((key, norm_value)) except __HOLE__: self.log.warning( "Couldn't convert value '{value}' to float".format( value=value)) return metrics
ValueError
dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/nagiosperfdata/nagiosperfdata.py/NagiosPerfdataCollector._parse_perfdata
7,930
def _process_file(self, path): """Parse and submit the metrics from a file """ try: f = open(path) for line in f: self._process_line(line) os.remove(path) except __HOLE__, ex: self.log.error("Could not open file `{path}': {error}".format( path=path, error=ex.strerror))
IOError
dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/nagiosperfdata/nagiosperfdata.py/NagiosPerfdataCollector._process_file
7,931
@must_be_signed @must_have_addon('osfstorage', 'node') def osfstorage_update_metadata(node_addon, payload, **kwargs): try: version_id = payload['version'] metadata = payload['metadata'] except __HOLE__: raise HTTPError(httplib.BAD_REQUEST) version = models.FileVersion.load(version_id) if version is None: raise HTTPError(httplib.NOT_FOUND) version.update_metadata(metadata) return {'status': 'success'}
KeyError
dataset/ETHPy150Open CenterForOpenScience/osf.io/website/addons/osfstorage/views.py/osfstorage_update_metadata
7,932
@must_be_signed @decorators.autoload_filenode(default_root=True) def osfstorage_get_metadata(file_node, **kwargs): try: # TODO This should change to version as its internal it can be changed anytime version = int(request.args.get('revision')) except (ValueError, __HOLE__): # If its not a number version = None return file_node.serialize(version=version, include_full=True)
TypeError
dataset/ETHPy150Open CenterForOpenScience/osf.io/website/addons/osfstorage/views.py/osfstorage_get_metadata
7,933
@must_be_signed @must_not_be_registration @decorators.autoload_filenode(must_be='folder') def osfstorage_create_child(file_node, payload, node_addon, **kwargs): parent = file_node # Just for clarity name = payload.get('name') user = User.load(payload.get('user')) is_folder = payload.get('kind') == 'folder' if not (name or user) or '/' in name: raise HTTPError(httplib.BAD_REQUEST) try: if is_folder: created, file_node = True, parent.append_folder(name) else: created, file_node = True, parent.append_file(name) except KeyExistsException: created, file_node = False, parent.find_child_by_name(name, kind=int(not is_folder)) if not created and is_folder: raise HTTPError(httplib.CONFLICT, data={ 'message': 'Cannot create folder "{name}" because a file or folder already exists at path "{path}"'.format( name=file_node.name, path=file_node.materialized_path, ) }) if not is_folder: try: if file_node.checkout is None or file_node.checkout._id == user._id: version = file_node.create_version( user, dict(payload['settings'], **dict( payload['worker'], **{ 'object': payload['metadata']['name'], 'service': payload['metadata']['provider'], }) ), dict(payload['metadata'], **payload['hashes']) ) version_id = version._id archive_exists = version.archive is not None else: raise HTTPError(httplib.FORBIDDEN, data={ 'message_long': 'File cannot be updated due to checkout status.' }) except __HOLE__: raise HTTPError(httplib.BAD_REQUEST) else: version_id = None archive_exists = False return { 'status': 'success', 'archive': not archive_exists, # Should waterbutler also archive this file 'data': file_node.serialize(), 'version': version_id, }, httplib.CREATED if created else httplib.OK
KeyError
dataset/ETHPy150Open CenterForOpenScience/osf.io/website/addons/osfstorage/views.py/osfstorage_create_child
7,934
@must_be_signed @decorators.autoload_filenode(must_be='file') def osfstorage_download(file_node, payload, node_addon, **kwargs): if not request.args.get('version'): version_id = None else: try: version_id = int(request.args['version']) except __HOLE__: raise make_error(httplib.BAD_REQUEST, message_short='Version must be an integer if not specified') version = file_node.get_version(version_id, required=True) if request.args.get('mode') not in ('render', ): utils.update_analytics(node_addon.owner, file_node._id, int(version.identifier) - 1) return { 'data': { 'name': file_node.name, 'path': version.location_hash, }, 'settings': { osf_storage_settings.WATERBUTLER_RESOURCE: version.location[osf_storage_settings.WATERBUTLER_RESOURCE], }, }
ValueError
dataset/ETHPy150Open CenterForOpenScience/osf.io/website/addons/osfstorage/views.py/osfstorage_download
7,935
def getSubtitleJSON(PMS_address, path, options): """ # double check aTV UDID, redo from client IP if needed/possible if not 'PlexConnectUDID' in options: UDID = getATVFromIP(options['aTVAddress']) if UDID: options['PlexConnectUDID'] = UDID """ path = path + '?' if not '?' in path else '&' path = path + 'encoding=utf-8' if not 'PlexConnectUDID' in options: # aTV unidentified, UDID not known return False UDID = options['PlexConnectUDID'] # determine PMS_uuid, PMSBaseURL from IP (PMS_mark) xargs = {} PMS_uuid = PlexAPI.getPMSFromAddress(UDID, PMS_address) PMS_baseURL = PlexAPI.getPMSProperty(UDID, PMS_uuid, 'baseURL') xargs['X-Plex-Token'] = PlexAPI.getPMSProperty(UDID, PMS_uuid, 'accesstoken') dprint(__name__, 1, "subtitle URL: {0}{1}", PMS_baseURL, path) dprint(__name__, 1, "xargs: {0}", xargs) request = urllib2.Request(PMS_baseURL+path , None, xargs) try: response = urllib2.urlopen(request, timeout=20) except urllib2.URLError as e: dprint(__name__, 0, 'No Response from Plex Media Server') if hasattr(e, 'reason'): dprint(__name__, 0, "We failed to reach a server. Reason: {0}", e.reason) elif hasattr(e, 'code'): dprint(__name__, 0, "The server couldn't fulfill the request. Error code: {0}", e.code) return False except __HOLE__: dprint(__name__, 0, 'Error loading response XML from Plex Media Server') return False # Todo: Deal with ANSI files. How to select used "codepage"? subtitleFile = response.read() print response.headers dprint(__name__, 1, "====== received Subtitle ======") dprint(__name__, 1, "{0} [...]", subtitleFile[:255]) dprint(__name__, 1, "====== Subtitle finished ======") if options['PlexConnectSubtitleFormat']=='srt': subtitle = parseSRT(subtitleFile) else: return False JSON = json.dumps(subtitle) dprint(__name__, 1, "====== generated subtitle aTV subtitle JSON ======") dprint(__name__, 1, "{0} [...]", JSON[:255]) dprint(__name__, 1, "====== aTV subtitle JSON finished ======") return(JSON)
IOError
dataset/ETHPy150Open iBaa/PlexConnect/Subtitle.py/getSubtitleJSON
7,936
def __call__(self, parser, args, values, option_string=None): if values is None: values = '1' try: verbosity = int(values) except __HOLE__: # The default is 1, so one -v should be 2 verbosity = values.count('v') + 1 if verbosity > 3: verbosity = 3 setattr(args, self.dest, verbosity)
ValueError
dataset/ETHPy150Open winhamwr/neckbeard/neckbeard/bin/neckbeard.py/VerboseAction.__call__
7,937
def wait_for(self, function_with_assertion, timeout=5): start_time = time.time() while time.time() - start_time < timeout: try: return function_with_assertion() except (__HOLE__, WebDriverException): time.sleep(0.1) # one more try, which will raise any errors if they are outstanding return function_with_assertion()
AssertionError
dataset/ETHPy150Open chrisdev/wagtail-cookiecutter-foundation/{{cookiecutter.repo_name}}/functional_tests/test_events.py/EventsPageTests.wait_for
7,938
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except __HOLE__: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.log_fixture = self.useFixture(fixtures.FakeLogger())
ValueError
dataset/ETHPy150Open openstack/python-congressclient/congressclient/tests/base.py/TestCase.setUp
7,939
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." User = orm[user_orm_label] try: user = User.objects.all()[0] for article in orm.Article.objects.all(): article.author = user article.save() except __HOLE__: pass
IndexError
dataset/ETHPy150Open gkmngrgn/radpress/radpress/south_migrations/0004_radpress_article_authors.py/Migration.forwards
7,940
def do_command(self, e, cmd): self.activated = True logger.debug('Received command: %s', cmd) nickname = e.source.nick if nickname not in self.limit_access_to: self.send_user_message( nickname, "I'm sorry, %s, you are not allowed to give commands " "to this debugger." % ( nickname, ) ) return if cmd.startswith("!allow"): allows = cmd.split(' ') usernames = allows[1:] if not self.limit_access_to: self.limit_access_to.append(nickname) self.limit_access_to.extend(usernames) self.send_channel_message( "The following users have been granted access to the debugger:" " %s." % ( ', '.join(usernames) ) ) elif cmd.startswith("!forbid"): forbids = cmd.split(' ') usernames = forbids[1:] try: for username in usernames: self.limit_access_to.remove(username) self.send_channel_message( "The following users have been forbidden access to the " "debugger: %s." % ( ', '.join(usernames) ) ) except ValueError: self.send_channel_message( "The users %s are not in the 'allows' list. You must " "have a defined 'allows' list to remove users from it." % ( ', '.join(usernames) ) ) if not self.limit_access_to: self.send_channel_message( "No users are allowed to interact with the debugger; " "continuing from breakpoint." ) self.queue.put('continue') elif cmd.startswith("!set_dpaste_minimum_response_length"): value = cmd.split(' ') try: self.dpaste_minimum_response_length = int(value[1]) self.send_channel_message( "Messages longer than %s lines will now be posted " "to dpaste if possible." % ( self.dpaste_minimum_response_length ) ) except (TypeError, IndexError, __HOLE__): self.send_channel_message( "An error was encountered while setting the " "dpaste_minimum_response_length setting. %s" ) elif cmd.startswith("!set_message_wait_seconds"): value = cmd.split(' ') try: self.message_wait_seconds = float(value[1]) self.send_channel_message( "There will be a delay of %s seconds between " "sending each message." % ( self.message_wait_seconds ) ) except (TypeError, IndexError, ValueError): self.send_channel_message( "An error was encountered while setting the " "message_wait_seconds setting." ) elif cmd.startswith("!help"): available_commands = textwrap.dedent(""" Available Commands: * !!allow NICKNAME Add NICKNAME to the list of users that are allowed to interact with the debugger. Current value: {limit_access_to}. * !!forbid NICKNAME Remove NICKNAME from the list of users that are allowed to interact with the debugger. * !!set_dpaste_minimum_response_length INTEGER Try to send messages this length or longer in lines to dpaste rather than sending them to IRC directly. Current value: {dpaste_minimum_response_length}. * !!set_message_wait_seconds FLOAT Set the number of seconds to wait between sending messages (this is a measure used to prevent being kicked from Freenode and other IRC servers that enforce limits on the number of messages a client an send in a given period of time. Current value: {message_wait_seconds}. """.format( limit_access_to=self.limit_access_to, dpaste_minimum_response_length=( self.dpaste_minimum_response_length ), message_wait_seconds=self.message_wait_seconds, )) self.send_channel_message( available_commands, dpaste=True, ) else: self.queue.put(cmd.strip())
ValueError
dataset/ETHPy150Open coddingtonbear/ircpdb/ircpdb/bot.py/IrcpdbBot.do_command
7,941
def process_forever(self, inhandle, outhandle, timeout=0.1): self._connect() # Let's mark out inhandle as non-blocking fcntl.fcntl(inhandle, fcntl.F_SETFL, os.O_NONBLOCK) # Used for keeping track of when the bot was started # so we can disconnect for inactivity. started = time.time() # Keeps track of whether or not we're in the process of # disconnecting (queued the 'q' command) disconnecting = False while True: try: messages = inhandle.read() except IOError: messages = None if messages: for message in messages.split('(Pdb)'): stripped = message.strip() if stripped: logger.debug('>> %s', stripped) self.send_channel_message(stripped) self.send_prompt() try: self.reactor.process_once(timeout) except __HOLE__: # This just *happens* -- I think these are coming from # maybe MOTD messages? It isn't clear. logger.warning( 'UnicodeDecodeError raised while processing messages.' ) while True: if self.queue.empty(): break message = self.queue.get(block=False) logger.debug('<< %s', message) outhandle.write(u'%s\n' % message) outhandle.flush() if ( time.time() > started + self.activation_timeout and not self.activated and not disconnecting ): self.send_channel_message( [ "No response received within %s seconds; " "disconnecting due to inactivity." % ( self.activation_timeout ) ], dpaste=False, ) disconnecting = True self.queue.put('c')
UnicodeDecodeError
dataset/ETHPy150Open coddingtonbear/ircpdb/ircpdb/bot.py/IrcpdbBot.process_forever
7,942
def get_attribute(self, name): try: xmlattr = self.__class__.ATTRIBUTES[name] except KeyError: try: xmlattr = self.__class__.KWATTRIBUTES[name] except KeyError: return None try: return self.__dict__["_attribs"][xmlattr.name].value except __HOLE__: # might be implied, fixed, or enum... if xmlattr.a_decl in (FIXED, IMPLIED, DEFAULT): return xmlattr.default or "" return None
KeyError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POM.py/ElementNode.get_attribute
7,943
def set_attribute(self, name, val, ns=""): """set_attribute(name, value) Set the element node attribute "name" to "value". The name can be the shorthand identifier, or the real name. """ if ":" in name: self._attribs[name] = POMAttribute(name, val) return True try: xmlattr = self.__class__.ATTRIBUTES[name] except KeyError: try: xmlattr = self.__class__.KWATTRIBUTES[name] except __HOLE__: return False self._attribs[xmlattr.name] = POMAttribute(xmlattr.name, val, ns) return True
KeyError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POM.py/ElementNode.set_attribute
7,944
def __delattr__(self, name): try: xmlattr = self.__class__.ATTRIBUTES[name] except KeyError: try: xmlattr = self.__class__.KWATTRIBUTES[name] except __HOLE__: try: del self.__dict__[key] except KeyError: raise AttributeError("%r has no attribute %r " % (self.__class__, name)) try: del self._attribs[xmlattr.name] except KeyError: pass
KeyError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POM.py/ElementNode.__delattr__
7,945
def detach(self): """Detach this node from the tree. It becomes the root of another tree.""" if self._parent: try: i = self._parent.index(self) del self._parent[i] except __HOLE__: pass self._parent = None return self
ValueError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POM.py/ElementNode.detach
7,946
def validate(self, encoding=DEFAULT_ENCODING): ff = FakeFile(None) # Will raise a ValidationError if not valid. # Don't need full backtrace for this type of error. try: self.emit(ff, encoding) except __HOLE__ as err: raise ValidationError(err)
ValidationError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POM.py/ElementNode.validate
7,947
def _attribs_match(self, obj, attribdict): for tname, tval in attribdict.items(): try: if getattr(obj, tname) != tval: return 0 except __HOLE__: return 0 return 1
AttributeError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POM.py/ElementNode._attribs_match
7,948
def next(self): while 1: try: n = self.node[self.i] except __HOLE__: raise StopIteration self.i += 1 if isinstance(n, elclass): break return n
IndexError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POM.py/NodeIterator.next
7,949
def __init__(self, dtd=None, doctype=None, lang=None, encoding=DEFAULT_ENCODING): if doctype is None and dtd is None: raise ValueError("POMDocument: Need one of doctype or dtd parameter.") self.dtds = [] self.root = None self.lang = lang self.dirty = 0 self._idmap = {} self._COUNTERS = {} if doctype: # implies new document self.set_doctype(doctype) elif dtd: self.set_dtd(dtd) try: root = self.dtd._Root() except __HOLE__: print("Document warning: unknown root element.", file=sys.stderr) else: self.set_root(root) self.set_encoding(encoding) self.set_language(lang)
AttributeError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POM.py/POMDocument.__init__
7,950
def get_identity(self, name): for dtd in self.dtds: try: return dtd.GENERAL_ENTITIES[name] except __HOLE__: pass return None
KeyError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POM.py/POMDocument.get_identity
7,951
def _find_element(elname, modules): for mod in modules: try: return getattr(mod, elname) except __HOLE__: continue return None
AttributeError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POM.py/_find_element
7,952
def _free_slave_executor(self, slave): num_executors_in_use = slave.free_executor() if num_executors_in_use == 0: try: self._slaves_allocated.remove(slave) except __HOLE__: pass # We have already deallocated this slave, no need to teardown else: slave.teardown()
ValueError
dataset/ETHPy150Open box/ClusterRunner/app/master/build_scheduler.py/BuildScheduler._free_slave_executor
7,953
def _traverse(node): """ Generator to traverse a tree accessing the nodes' children attribute. **Example** Here is a simple example printing the names of all the objects in the pipeline:: for obj in mlab.pipeline.traverse(mlab.gcf()): print obj.name """ try: for leaf in node.children: for leaflet in _traverse(leaf): yield leaflet except __HOLE__: pass yield node
AttributeError
dataset/ETHPy150Open enthought/mayavi/mayavi/tools/tools.py/_traverse
7,954
def _find_module_manager(object=None, data_type=None): """If an object is specified, returns its module_manager, elsewhere finds the first module_manager in the scene. """ if object is None: for object in _traverse(gcf()): if isinstance(object, ModuleManager): if data_type == 'scalar': if not _has_scalar_data(object): continue try: if not object.actor.mapper.scalar_visibility: continue except __HOLE__: pass if data_type == 'vector' and not _has_vector_data(object): continue if data_type == 'tensor' and not _has_tensor_data(object): continue return object else: if hasattr(object, 'module_manager'): if ((data_type == 'scalar' and _has_scalar_data(object)) or (data_type == 'vector' and _has_vector_data(object)) or (data_type == 'tensor' and _has_tensor_data(object)) or data_type is None): return object.module_manager else: print("This object has no %s data" % data_type) else: print("This object has no color map") return None
AttributeError
dataset/ETHPy150Open enthought/mayavi/mayavi/tools/tools.py/_find_module_manager
7,955
def test_should_not_be_able_to_set_generated_attributes(self): container = self.manager.create() # Verify that generated attributes cannot be set. attributes = [ "container_ref", "created", "updated", "status", "consumers" ] for attr in attributes: try: setattr(container, attr, "test") self.fail("didn't raise an AttributeError exception") except __HOLE__: pass
AttributeError
dataset/ETHPy150Open openstack/python-barbicanclient/barbicanclient/tests/test_containers.py/WhenTestingContainers.test_should_not_be_able_to_set_generated_attributes
7,956
def __negotiatehttp(self, destaddr, destport): """__negotiatehttp(self,destaddr,destport) Negotiates a connection through an HTTP server. """ # If we need to resolve locally, we do this now if not self.__proxy[3]: addr = socket.gethostbyname(destaddr) else: addr = destaddr headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"] headers += ["Host: ", destaddr, "\r\n"] if (self.__proxy[4] != None and self.__proxy[5] != None): headers += [self.__getauthheader(), "\r\n"] headers.append("\r\n") self.sendall("".join(headers).encode()) # We read the response until we get the string "\r\n\r\n" resp = self.recv(1) while resp.find("\r\n\r\n".encode()) == -1: resp = resp + self.recv(1) # We just need the first line to check if the connection # was successful statusline = resp.splitlines()[0].split(" ".encode(), 2) if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()): self.close() raise GeneralProxyError((1, _generalerrors[1])) try: statuscode = int(statusline[1]) except __HOLE__: self.close() raise GeneralProxyError((1, _generalerrors[1])) if statuscode != 200: self.close() raise HTTPError((statuscode, statusline[2])) self.__proxysockname = ("0.0.0.0", 0) self.__proxypeername = (addr, destport)
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/httplib2/httplib2/socks.py/socksocket.__negotiatehttp
7,957
def import_validator(validator): try: import_name, function_name = validator.rsplit('.', 1) except __HOLE__: # no dot; treat it as a global func = globals().get(validator, None) if not func: # we use ImportError to keep error handling for callers simple raise ImportError return validator else: # The below __import__() call is from python docs, and is equivalent to: # # from import_name import function_name # import_module = __import__(import_name, globals(), locals(), [function_name]) return getattr(import_module, function_name)
ValueError
dataset/ETHPy150Open dokterbob/satchmo/satchmo/apps/product/utils.py/import_validator
7,958
def autoclean(input_dataframe, drop_nans=False, copy=False, encoder=None, encoder_kwargs=None, ignore_update_check=False): """Performs a series of automated data cleaning transformations on the provided data set Parameters ---------- input_dataframe: pandas.DataFrame Data set to clean drop_nans: bool Drop all rows that have a NaN in any column (default: False) copy: bool Make a copy of the data set (default: False) encoder: category_encoders transformer The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder) encoder_kwargs: category_encoders The a valid sklearn transformer to encode categorical features. Default (None) ignore_update_check: bool Do not check for the latest version of datacleaner Returns ---------- output_dataframe: pandas.DataFrame Cleaned data set """ global update_checked if ignore_update_check: update_checked = True if not update_checked: update_check('datacleaner', __version__) update_checked = True if copy: input_dataframe = input_dataframe.copy() if drop_nans: input_dataframe.dropna(inplace=True) if encoder_kwargs is None: encoder_kwargs = {} for column in input_dataframe.columns.values: # Replace NaNs with the median or mode of the column depending on the column type try: input_dataframe[column].fillna(input_dataframe[column].median(), inplace=True) except __HOLE__: input_dataframe[column].fillna(input_dataframe[column].mode()[0], inplace=True) # Encode all strings with numerical equivalents if str(input_dataframe[column].values.dtype) == 'object': if encoder is not None: column_encoder = encoder(**encoder_kwargs).fit(input_dataframe[column].values) else: column_encoder = LabelEncoder().fit(input_dataframe[column].values) input_dataframe[column] = column_encoder.transform(input_dataframe[column].values) return input_dataframe
TypeError
dataset/ETHPy150Open rhiever/datacleaner/datacleaner/datacleaner.py/autoclean
7,959
def autoclean_cv(training_dataframe, testing_dataframe, drop_nans=False, copy=False, encoder=None, encoder_kwargs=None, ignore_update_check=False): """Performs a series of automated data cleaning transformations on the provided training and testing data sets Unlike `autoclean()`, this function takes cross-validation into account by learning the data transformations from only the training set, then applying those transformations to both the training and testing set. By doing so, this function will prevent information leak from the training set into the testing set. Parameters ---------- training_dataframe: pandas.DataFrame Training data set testing_dataframe: pandas.DataFrame Testing data set drop_nans: bool Drop all rows that have a NaN in any column (default: False) copy: bool Make a copy of the data set (default: False) encoder: category_encoders transformer The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder) encoder_kwargs: category_encoders The a valid sklearn transformer to encode categorical features. Default (None) ignore_update_check: bool Do not check for the latest version of datacleaner Returns ---------- output_training_dataframe: pandas.DataFrame Cleaned training data set output_testing_dataframe: pandas.DataFrame Cleaned testing data set """ global update_checked if ignore_update_check: update_checked = True if not update_checked: update_check('datacleaner', __version__) update_checked = True if set(training_dataframe.columns.values) != set(testing_dataframe.columns.values): raise ValueError('The training and testing DataFrames do not have the same columns. ' 'Make sure that you are providing the same columns.') if copy: training_dataframe = training_dataframe.copy() testing_dataframe = testing_dataframe.copy() if drop_nans: training_dataframe.dropna(inplace=True) testing_dataframe.dropna(inplace=True) if encoder_kwargs is None: encoder_kwargs = {} for column in training_dataframe.columns.values: # Replace NaNs with the median or mode of the column depending on the column type try: column_median = training_dataframe[column].median() training_dataframe[column].fillna(column_median, inplace=True) testing_dataframe[column].fillna(column_median, inplace=True) except __HOLE__: column_mode = training_dataframe[column].mode()[0] training_dataframe[column].fillna(column_mode, inplace=True) testing_dataframe[column].fillna(column_mode, inplace=True) # Encode all strings with numerical equivalents if str(training_dataframe[column].values.dtype) == 'object': if encoder is not None: column_encoder = encoder(**encoder_kwargs).fit(training_dataframe[column].values) else: column_encoder = LabelEncoder().fit(training_dataframe[column].values) training_dataframe[column] = column_encoder.transform(training_dataframe[column].values) testing_dataframe[column] = column_encoder.transform(testing_dataframe[column].values) return training_dataframe, testing_dataframe
TypeError
dataset/ETHPy150Open rhiever/datacleaner/datacleaner/datacleaner.py/autoclean_cv
7,960
def main(): """ Simple command-line program for Uploading a file from host to guest """ args = get_args() vm_path = args.path_inside_vm try: service_instance = connect.SmartConnect(host=args.host, user=args.user, pwd=args.password, port=int(args.port)) atexit.register(connect.Disconnect, service_instance) content = service_instance.RetrieveContent() vm = content.searchIndex.FindByUuid(None, args.vm_uuid, True) tools_status = vm.guest.toolsStatus if (tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning'): raise SystemExit( "VMwareTools is either not running or not installed. " "Rerun the script after verifying that VMWareTools " "is running") creds = vim.vm.guest.NamePasswordAuthentication( username=args.vm_user, password=args.vm_pwd) with open(args.upload_file, 'rb') as myfile: args = myfile.read() try: file_attribute = vim.vm.guest.FileManager.FileAttributes() url = content.guestOperationsManager.fileManager. \ InitiateFileTransferToGuest(vm, creds, vm_path, file_attribute, len(args), True) resp = requests.put(url, data=args, verify=False) if not resp.status_code == 200: print "Error while uploading file" else: print "Successfully uploaded file" except __HOLE__, e: print e except vmodl.MethodFault as error: print "Caught vmodl fault : " + error.msg return -1 return 0 # Start program
IOError
dataset/ETHPy150Open vmware/pyvmomi-community-samples/samples/upload_file_to_vm.py/main
7,961
def work_completed(self, success=True): '''Receives and consumes work completed from the victim, analyzes the work, and returns True if the attack is complete (victory), otherwise returns False if more work is needed. It also creates the new work that is needed. Post-condition: Either the attack is completed, or there is work to do (there are unstarted samplesets in the database).''' try: if success: # Call sniffer to get captured data capture, records = self._collect_capture() logger.debug('Work completed:') logger.debug('\tLength: {}'.format(len(capture))) logger.debug('\tRecords: {}'.format(records)) # Check if all TLS response records were captured, # if available if self._victim.target.recordscardinality: assert records == SAMPLES_PER_SAMPLESET * self._victim.target.recordscardinality, 'Not all records captured' else: logger.debug('Client returned fail to realtime') assert success # Stop data collection and delete sniffer self._sniffer.delete() except (requests.HTTPError, requests.exceptions.ConnectionError, __HOLE__), err: if isinstance(err, requests.HTTPError): status_code = err.response.status_code logger.warning('Caught {} while trying to collect capture and delete sniffer.'.format(status_code)) # If status was raised due to malformed capture, # delete sniffer to avoid conflict. if status_code == 422: try: self._sniffer.delete() except (requests.HTTPError, requests.exceptions.ConnectionError), err: logger.warning('Caught error when trying to delete sniffer: {}'.format(err)) elif isinstance(err, requests.exceptions.ConnectionError): logger.warning('Caught ConnectionError') elif isinstance(err, AssertionError): logger.warning('Realtime reported unsuccessful capture') try: self._sniffer.delete() except (requests.HTTPError, requests.exceptions.ConnectionError), err: logger.warning('Caught error when trying to delete sniffer: {}'.format(err)) # An error occurred, so if there is a started sampleset mark it as failed if SampleSet.objects.filter(round=self._round, completed=None).exclude(started=None): self._mark_current_work_completed() return False self._mark_current_work_completed(capture) round_samplesets = SampleSet.objects.filter(round=self._round) unstarted_samplesets = round_samplesets.filter(started=None) if unstarted_samplesets: # Batch is not yet complete, we need to collect more samplesets # that have already been created for this batch. return False # All batches are completed. self._analyze_current_round() if self._round_is_completed(): # Advance to the next round. self._create_next_round() if self._attack_is_completed(): return True # Not enough confidence, we need to create more samplesets to be # collected for this round. self._create_round_samplesets() return False
AssertionError
dataset/ETHPy150Open dionyziz/rupture/backend/breach/strategy.py/Strategy.work_completed
7,962
@register.tag() def suggested(parser, token): """ Returns a list of Recommendation (suggestions of objects) for the current user. Usage: :: {% suggested as suggestions [limit 5] %} {% for suggested in suggestions %} {{ suggested.object }} {% endfor %} """ bits = token.contents.split() varname = bits[2] try: limit = int(bits[4]) except __HOLE__: limit = 5 return SuggestionNode(varname, limit)
IndexError
dataset/ETHPy150Open fcurella/django-recommends/recommends/templatetags/recommends.py/suggested
7,963
def run(self): self.assert_has_content() if self.arguments: language = self.arguments[0] else: language = '' set_classes(self.options) classes = ['code', language] if 'classes' in self.options: classes.extend(self.options['classes']) # TODO: config setting to skip lexical analysis: ## if document.settings.no_highlight: ## language = '' # set up lexical analyzer tokens = Lexer(self.content, language) if 'number-lines' in self.options: # optional argument `startline`, defaults to 1 try: startline = int(self.options['number-lines'] or 1) except __HOLE__: raise self.error(':number-lines: with non-integer start value') endline = startline + len(self.content) # add linenumber filter: tokens = NumberLines(tokens, startline, endline) node = nodes.literal_block('\n'.join(self.content), classes=classes) self.add_name(node) # analyze content and add nodes for every token for cls, value in tokens: # print (cls, value) if cls in unstyled_tokens: # insert as Text to decrease the verbosity of the output. node += nodes.Text(value, value) else: node += nodes.inline(value, value, classes=[cls]) return [node] # Register Directive # ------------------ # ::
ValueError
dataset/ETHPy150Open espeed/lightbulb/lightbulb/directives/pygments_code_block_directive.py/CodeBlock.run
7,964
def in_ipython(): try: __IPYTHON__ except __HOLE__: return False else: return True
NameError
dataset/ETHPy150Open emansim/text2image/mnist-captions/sample.py/in_ipython
7,965
def _get_metadata(self, source, source_abspath): doctree = docutils.core.publish_doctree(source) docinfo = doctree.traverse(docutils.nodes.docinfo) try: meta = self._process_standard_fields(docinfo) meta = self._process_custom_fields(meta) except __HOLE__: print "ERROR: Source file is missing data: %s" % source_abspath raise for key, value in meta.items(): meta[key] = value.astext() return meta
IndexError
dataset/ETHPy150Open espeed/lightbulb/lightbulb/engine.py/Parser._get_metadata
7,966
def getdata(filename, *args, **kwargs): """ Get the data from an extension of a FITS file (and optionally the header). Parameters ---------- filename : file path, file object, or file like object File to get data from. If opened, mode must be one of the following rb, rb+, or ab+. ext The rest of the arguments are for extension specification. They are flexible and are best illustrated by examples. No extra arguments implies the primary header:: >>> getdata('in.fits') By extension number:: >>> getdata('in.fits', 0) # the primary header >>> getdata('in.fits', 2) # the second extension >>> getdata('in.fits', ext=2) # the second extension By name, i.e., ``EXTNAME`` value (if unique):: >>> getdata('in.fits', 'sci') >>> getdata('in.fits', extname='sci') # equivalent Note ``EXTNAME`` values are not case sensitive By combination of ``EXTNAME`` and EXTVER`` as separate arguments or as a tuple:: >>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2 >>> getdata('in.fits', extname='sci', extver=2) # equivalent >>> getdata('in.fits', ('sci', 2)) # equivalent Ambiguous or conflicting specifications will raise an exception:: >>> getdata('in.fits', ext=('sci',1), extname='err', extver=2) header : bool, optional If `True`, return the data and the header of the specified HDU as a tuple. lower, upper : bool, optional If ``lower`` or ``upper`` are `True`, the field names in the returned data object will be converted to lower or upper case, respectively. view : ndarray, optional When given, the data will be returned wrapped in the given ndarray subclass by calling:: data.view(view) kwargs Any additional keyword arguments to be passed to `pyfits.open`. Returns ------- array : array, record array or groups data object Type depends on the type of the extension being referenced. If the optional keyword ``header`` is set to `True`, this function will return a (``data``, ``header``) tuple. """ mode, closed = _get_file_mode(filename) header = kwargs.pop('header', None) lower = kwargs.pop('lower', None) upper = kwargs.pop('upper', None) view = kwargs.pop('view', None) hdulist, extidx = _getext(filename, mode, *args, **kwargs) hdu = hdulist[extidx] data = hdu.data if data is None and extidx == 0: try: hdu = hdulist[1] data = hdu.data except __HOLE__: raise IndexError('No data in this HDU.') if data is None: raise IndexError('No data in this HDU.') if header: hdr = hdu.header hdulist.close(closed=closed) # Change case of names if requested trans = None if lower: trans = lambda s: s.lower() elif upper: trans = lambda s: s.upper() if trans: if data.dtype.names is None: # this data does not have fields return if data.dtype.descr[0][0] == '': # this data does not have fields return data.dtype.names = [trans(n) for n in data.dtype.names] # allow different views into the underlying ndarray. Keep the original # view just in case there is a problem if isinstance(view, type) and issubclass(view, np.ndarray): data = data.view(view) if header: return data, hdr else: return data
IndexError
dataset/ETHPy150Open spacetelescope/PyFITS/pyfits/convenience.py/getdata
7,967
def _stat_filename_or_fileobj(filename): closed = fileobj_closed(filename) name = fileobj_name(filename) or '' try: loc = filename.tell() except __HOLE__: loc = 0 noexist_or_empty = ((name and (not os.path.exists(name) or (os.path.getsize(name) == 0))) or (not name and loc == 0)) return name, closed, noexist_or_empty
AttributeError
dataset/ETHPy150Open spacetelescope/PyFITS/pyfits/convenience.py/_stat_filename_or_fileobj
7,968
def get_multiline(self,f,m): content = [] next_line = '' while not re.search("^}",next_line): content.append(next_line) try: next_line = next(f) except __HOLE__: # This will happen at end of file next_line = None break content = "".join(content) return content, next_line
StopIteration
dataset/ETHPy150Open mogui/pyorient/pyorient/groovy.py/Scanner.get_multiline
7,969
@assert_equal.register(dict, dict) def assert_dict_equal(result, expected, path=(), **kwargs): if path is None: path = () result_keys = viewkeys(result) expected_keys = viewkeys(expected) if result_keys != expected_keys: if result_keys > expected_keys: diff = result_keys - expected_keys msg = 'extra %s in result: %r' % (_s('key', diff), diff) elif result_keys < expected_keys: diff = expected_keys - result_keys msg = 'result is missing %s: %r' % (_s('key', diff), diff) else: sym = result_keys ^ expected_keys in_result = sym - expected_keys in_expected = sym - result_keys msg = '%s only in result: %s\n%s only in expected: %s' % ( _s('key', in_result), in_result, _s('key', in_expected), in_expected, ) raise AssertionError( 'dict keys do not match\n%s\n%s' % ( msg, _fmt_path(path + ('.%s()' % ('viewkeys' if PY2 else 'keys'),)), ), ) failures = [] for k, (resultv, expectedv) in iteritems(dzip_exact(result, expected)): try: assert_equal( resultv, expectedv, path=path + ('[%r]' % k,), **kwargs ) except __HOLE__ as e: failures.append(str(e)) if failures: raise AssertionError('\n'.join(failures))
AssertionError
dataset/ETHPy150Open quantopian/zipline/zipline/testing/predicates.py/assert_dict_equal
7,970
def test_html(self): try: raise ValueError("Hello World") except __HOLE__ as err: # If the html was templated we could do a bit more here. # At least check that we get details on what we just raised. html = cgitb.html(sys.exc_info()) self.assertIn("ValueError", html) self.assertIn(str(err), html)
ValueError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_cgitb.py/TestCgitb.test_html
7,971
def test_text(self): try: raise ValueError("Hello World") except __HOLE__ as err: text = cgitb.text(sys.exc_info()) self.assertIn("ValueError", text) self.assertIn("Hello World", text)
ValueError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_cgitb.py/TestCgitb.test_text
7,972
def test_raise_exception_if_flag_doesnt_exist_but_config_flag_is_set(self): self.app.config[FLAG_CONFIG] = {} self.app.config[RAISE_ERROR] = True self.app.debug = True with self.app.test_request_context('/'): url = url_for('view_based_feature_flag') try: self.test_client.get(url) except __HOLE__: # assertRaises no worky for some reason :/ pass else: raise AssertionError("We expected to throw a KeyError, but didn't.")
KeyError
dataset/ETHPy150Open trustrachel/Flask-FeatureFlags/tests/test_core_function.py/TestFeatureFlagCoreFunctionality.test_raise_exception_if_flag_doesnt_exist_but_config_flag_is_set
7,973
def test_raise_exception_if_config_section_doesnt_exist_and_config_flag_is_set(self): del self.app.config[FLAG_CONFIG] self.app.config[RAISE_ERROR] = True self.app.debug = True with self.app.test_request_context('/'): url = url_for('view_based_feature_flag') try: self.test_client.get(url) except __HOLE__: # assertRaises no worky for some reason :/ pass
KeyError
dataset/ETHPy150Open trustrachel/Flask-FeatureFlags/tests/test_core_function.py/TestFeatureFlagCoreFunctionality.test_raise_exception_if_config_section_doesnt_exist_and_config_flag_is_set
7,974
def init(): o = ip.options try: o.verbose except __HOLE__: o.allow_new_attr (True ) o.verbose = 0 ip.system = (sys.platform == 'win32' and new_ipsystem_win32 or new_ipsystem_posix)
AttributeError
dataset/ETHPy150Open ipython/ipython-py3k/IPython/quarantine/ipy_signals.py/init
7,975
def get_entry(ts, entryid): entries = ts.entries() try: id = int(entryid) except __HOLE__: # the entry is not an ID. regex = re.compile(entryid, flags = re.IGNORECASE) matches = [] for entry in entries: if regex.search(entry.task_name): matches += [entry] if len(matches) is 1: return matches[0] elif len(matches) >= 2: print 'More than one match found. Narrow your search.' return None else: # the entry is an ID. for entry in entries: if entry.id == id: return entry
ValueError
dataset/ETHPy150Open jakebasile/reap/reap/commands/basic.py/get_entry
7,976
def login(args): password = getpass.getpass() try: ts = reap.api.timesheet.Timesheet(args.baseuri, args.username, password) except __HOLE__: print 'Invalid Credentials.' return except urllib2.URLError: print 'Unable to communicate. Check information and try again.' return keyring.set_password(args.baseuri, args.username, password) save_info(args.baseuri, args.username) print 'You are now logged in.'
ValueError
dataset/ETHPy150Open jakebasile/reap/reap/commands/basic.py/login
7,977
def _cleanup_socket(self, socket_id): try: os.remove(self._socket_path(socket_id)) except __HOLE__: pass
OSError
dataset/ETHPy150Open codalab/codalab-cli/codalab/model/worker_model.py/WorkerModel._cleanup_socket
7,978
def _pythonize(value): if value in _PYTHON_CONSTANTS: return _PYTHON_CONSTANTS[value] for convert in int, float: try: return convert(value) except __HOLE__: pass if value[:1] == value[-1:] and value[0] in '"\'': value = value[1:-1] return str(value)
ValueError
dataset/ETHPy150Open quantmind/pulsar/pulsar/apps/wsgi/route.py/_pythonize
7,979
def safe_url(self, params=None): try: if params: return self.url(**params) else: return self.url() except __HOLE__: return None
KeyError
dataset/ETHPy150Open quantmind/pulsar/pulsar/apps/wsgi/route.py/Route.safe_url
7,980
def test_AlmostEqual(self): self.assertAlmostEqual(1.00000001, 1.0) self.assertNotAlmostEqual(1.0000001, 1.0) self.assertRaises(self.failureException, self.assertAlmostEqual, 1.0000001, 1.0) self.assertRaises(self.failureException, self.assertNotAlmostEqual, 1.00000001, 1.0) self.assertAlmostEqual(1.1, 1.0, places=0) self.assertRaises(self.failureException, self.assertAlmostEqual, 1.1, 1.0, places=1) self.assertAlmostEqual(0, .1+.1j, places=0) self.assertNotAlmostEqual(0, .1+.1j, places=1) self.assertRaises(self.failureException, self.assertAlmostEqual, 0, .1+.1j, places=1) self.assertRaises(self.failureException, self.assertNotAlmostEqual, 0, .1+.1j, places=0) try: self.assertAlmostEqual(float('inf'), float('inf')) self.assertRaises(self.failureException, self.assertNotAlmostEqual, float('inf'), float('inf')) except __HOLE__: # float('inf') is invalid on Windows in Python 2.4 / 2.5 x = object() self.assertAlmostEqual(x, x) self.assertRaises(self.failureException, self.assertNotAlmostEqual, x, x)
ValueError
dataset/ETHPy150Open balanced/status.balancedpayments.com/venv/lib/python2.7/site-packages/unittest2/test/test_assertions.py/Test_Assertions.test_AlmostEqual
7,981
def psoc5lp_reader(csv_file): '''Extract pin data from a Cypress PSoC5LP CSV file and return a dictionary of pin data. The CSV file contains one or more groups of rows formatted as follows: A row with a single field containing the part number. Zero or more blank rows. A row containing the column headers: 'Pin', 'Unit', 'Type', 'Style', 'Side', and 'Name'. (Only 'Pin' and 'Name' are required. The order of the columns is not important.) Each succeeding row should contain: The 'Pin' column should contain the pin number. The 'Unit' column specifies the bank or unit number for the pin. The 'Type' column specifies the pin type (input, output,...). The 'Style' column specifies the pin's schematic style. The 'Side' column specifies the side of the symbol the pin is on. The 'Name' column contains the pin name. A blank row terminates the pin data for the part and begins a new group of rows for another part. ''' while True: # Create a dictionary that uses the unit numbers as keys. Each entry in this dictionary # contains another dictionary that uses the side of the symbol as a key. Each entry in # that dictionary uses the pin names in that unit and on that side as keys. Each entry # in that dictionary is a list of Pin objects with each Pin object having the same name # as the dictionary key. So the pins are separated into units at the top level, and then # the sides of the symbol, and then the pins with the same name that are on that side # of the unit. pin_data = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) # Create a reader that starts from the current position in the CSV file. csv_reader = csv.reader(csv_file, skipinitialspace=True) # Extract part number from the first non-blank line. Break out of the infinite # while loop and stop processing this file if no part number is found. part_num = get_part_num(csv_reader) if part_num is None: break # Get the column header row for the part's pin data. headers = clean_headers(get_nonblank_row(csv_reader)) # Now create a DictReader for grabbing the pin data in each row. dict_reader = csv.DictReader(csv_file, headers, skipinitialspace=True) for index, row in enumerate(dict_reader): # A blank line signals the end of the pin data. if num_row_elements(list(row.values())) == 0: break # Get the pin attributes from the cells of the row of data. pin = copy.copy(DEFAULT_PIN) pin.index = index pin.type = '' for c, a in list(COLUMN_NAMES.items()): try: if c == 'name': row[c] = psoc5lp_pin_name_process(row[c]) setattr(pin, a, row[c]) except __HOLE__: pass if pin.num is None: issue( 'ERROR: No pin number on row {index} of {part_num}'.format( index=index, part_num=part_num), level='error') if pin.type == '': # No explicit pin type, so infer it from the pin name. DEFAULT_PIN_TYPE = 'input' # Assign this pin type if name inference can't be made. PIN_TYPE_PREFIXES = [ (r'P[0-9]+\[[0-9]+\]', 'bidirectional'), (r'VCC', 'power_out'), (r'VDD', 'power_in'), (r'VSS', 'power_in'), (r'IND', 'passive'), (r'VBOOST', 'input'), (r'VBAT', 'power_in'), (r'XRES', 'input'), (r'NC', 'no_connect'), ] for prefix, typ in PIN_TYPE_PREFIXES: if re.match(prefix, pin.name, re.IGNORECASE): pin.type = typ break else: issue( 'No match for pin {} on part {}, assigning as {}'.format( pin.name, part_num, DEFAULT_PIN_TYPE)) pin.type = DEFAULT_PIN_TYPE # Add the pin from this row of the CVS file to the pin dictionary. # Place all the like-named pins into a list under their common name. # We'll unbundle them later, if necessary. pin_data[pin.unit][pin.side][pin.name].append(pin) yield part_num, pin_data # Return the dictionary of pins extracted from the CVS file.
KeyError
dataset/ETHPy150Open xesscorp/KiPart/kipart/psoc5lp_reader.py/psoc5lp_reader
7,982
def _consumer(consume, queue): """Infinity worker that consumes tasks from queue. :param consume: method that consumes an object removed from the queue :param queue: deque object to popleft() objects from """ cache = {} while True: if not queue: break else: try: args = queue.popleft() except __HOLE__: # consumed by other thread continue try: consume(cache, args) except Exception as e: LOG.warning(_("Failed to consume a task from the queue: %s") % e) if logging.is_debug(): LOG.exception(e)
IndexError
dataset/ETHPy150Open openstack/rally/rally/common/broker.py/_consumer
7,983
@extensionclassmethod(Observable) def on_error_resume_next(cls, *args): """Continues an observable sequence that is terminated normally or by an exception with the next observable sequence. 1 - res = Observable.on_error_resume_next(xs, ys, zs) 2 - res = Observable.on_error_resume_next([xs, ys, zs]) 3 - res = Observable.on_error_resume_next(xs, factory) Returns an observable sequence that concatenates the source sequences, even if a sequence terminates exceptionally. """ if args and isinstance(args[0], list): sources = iter(args[0]) else: sources = iter(args) def subscribe(observer): subscription = SerialDisposable() def action(this, state=None): try: source = next(sources) except __HOLE__: observer.on_completed() return # Allow source to be a factory method taking an error source = source(state) if callable(source) else source current = Observable.from_future(source) d = SingleAssignmentDisposable() subscription.disposable = d d.disposable = current.subscribe( observer.on_next, lambda ex: this(ex), this) cancelable = immediate_scheduler.schedule_recursive(action) return CompositeDisposable(subscription, cancelable) return AnonymousObservable(subscribe)
StopIteration
dataset/ETHPy150Open ReactiveX/RxPY/rx/linq/observable/onerrorresumenext.py/on_error_resume_next
7,984
def coerce_put_post(request): """ Django doesn't particularly understand REST. In case we send data over PUT, Django won't actually look at the data and load it. We need to twist its arm here. The try/except abominiation here is due to a bug in mod_python. This should fix it. This is borrowed from django-piston. """ if request.method == "PUT": # Bug fix: if _load_post_and_files has already been called, for # example by middleware accessing request.POST, the below code to # pretend the request is a POST instead of a PUT will be too late # to make a difference. Also calling _load_post_and_files will result # in the following exception: # AttributeError: You cannot set the upload handlers after the upload # has been processed. # The fix is to check for the presence of the _post field which is set # the first time _load_post_and_files is called (both by wsgi.py and # modpython.py). If it's set, the request has to be 'reset' to redo # the query value parsing in POST mode. if hasattr(request, '_post'): del request._post del request._files request._read_started = False try: request.method = "POST" request._load_post_and_files() request.method = "PUT" except __HOLE__: request.META['REQUEST_METHOD'] = 'POST' request._load_post_and_files() request.META['REQUEST_METHOD'] = 'PUT' request.PUT = request.POST
AttributeError
dataset/ETHPy150Open bueda/django-comrade/comrade/http/__init__.py/coerce_put_post
7,985
def submit_job(self, data, t): """ Handles recieving work submission and checking that it is valid , if it meets network diff, etc. Sends reply to stratum client. """ params = data['params'] # [worker_name, job_id, extranonce2, ntime, nonce] # ["slush.miner1", "bf", "00000001", "504e86ed", "b2957c02"] if __debug__: self.logger.debug( "Recieved work submit:\n\tworker_name: {0}\n\t" "job_id: {1}\n\textranonce2: {2}\n\t" "ntime: {3}\n\tnonce: {4} ({int_nonce})" .format( *params, int_nonce=struct.unpack(str("<L"), unhexlify(params[4])))) if self.idle: self.idle = False self.server.idle_clients -= 1 self.last_share_submit = time.time() try: difficulty, job = self.job_mapper[data['params'][1]] job = job() # weakref will be None if the job has been GCed except KeyError: try: difficulty, job = self.old_job_mapper[data['params'][1]] job = job() # weakref will be None if the job has been GCed except __HOLE__: job = None # Job not in jobmapper at all, we got a bogus submit # since we can't identify the diff we just have to assume it's # current diff difficulty = self.difficulty if job not in self.server.active_jobs: self.send_error(self.STALE_SHARE_ERR, id_val=data['id']) self.reporter.log_share(client=self, diff=self.difficulty, typ=self.STALE_SHARE, params=params, job=job, start=t) return difficulty, self.STALE_SHARE # assemble a complete block header bytestring header = job.block_header( nonce=params[4], extra1=self._id, extra2=params[2], ntime=params[3]) # Check a submitted share against previous shares to eliminate # duplicates share_lower = (self._id.lower(), params[2].lower(), params[4].lower(), params[3].lower()) if share_lower in job.acc_shares: self.logger.info("Duplicate share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.DUP_SHARE_ERR, id_val=data['id']) self.reporter.log_share(client=self, diff=difficulty, typ=self.DUP_SHARE, params=params, job=job, start=t) return difficulty, self.DUP_SHARE job_target = target_from_diff(difficulty, job.diff1) hash_int = uint256_from_str(self.algo['module'](header)) if hash_int >= job_target: self.logger.info("Low diff share rejected from worker {}.{}!" .format(self.address, self.worker)) self.send_error(self.LOW_DIFF_ERR, id_val=data['id']) self.reporter.log_share(client=self, diff=difficulty, typ=self.LOW_DIFF_SHARE, params=params, job=job, start=t) return difficulty, self.LOW_DIFF_SHARE # we want to send an ack ASAP, so do it here self.send_success(id_val=data['id']) # Add the share to the accepted set to check for dups job.acc_shares.add(share_lower) self.accepted_shares += difficulty self.reporter.log_share(client=self, diff=difficulty, typ=self.VALID_SHARE, params=params, job=job, header_hash=hash_int, header=header, start=t) return difficulty, self.VALID_SHARE
KeyError
dataset/ETHPy150Open simplecrypto/powerpool/powerpool/stratum_server.py/StratumClient.submit_job
7,986
@loop(fin='stop', exit_exceptions=(socket.error, )) def read(self): # designed to time out approximately "push_job_interval" after the user # last recieved a job. Some miners will consider the mining server dead # if they don't recieve something at least once a minute, regardless of # whether a new job is _needed_. This aims to send a job _only_ as # often as needed line = with_timeout(time.time() - self.last_job_push + self.config['push_job_interval'] - self.time_seed, self.fp.readline, timeout_value='timeout') if line == 'timeout': t = time.time() # Set idle status if they haven't submitted in the perscribed time if not self.idle and (t - self.last_share_submit) > self.config['idle_worker_threshold']: self.idle = True self.server.idle_clients += 1 # Disconnect if we havne't heard from them in a while if (t - self.last_share_submit) > self.config['idle_worker_disconnect_threshold']: self.logger.info("Disconnecting worker {}.{} at ip {} for inactivity" .format(self.address, self.worker, self.peer_name[0])) self.stop() # push a new job if if (self.authenticated is True and # don't send to non-authed # force send if we need to push a new difficulty (self.next_diff != self.difficulty or # send if we're past the push interval t > (self.last_job_push + self.config['push_job_interval'] - self.time_seed))): # Since they might not be submitting jobs due to low hashrate, # check vardiff on timeout in addition to on mining submit if self.config['vardiff']['enabled'] is True: # If recalc didn't need to adjust then we need to push # because we're timed out if not self.recalc_vardiff(): self.push_job(timeout=True) else: self.push_job(timeout=True) # Continue loop, we just sent new job after timeout return line = line.strip() # Reading from a defunct connection yeilds an EOF character which gets # stripped off if not line: raise LoopExit("Closed file descriptor encountered") try: data = json.loads(line) except __HOLE__: self.logger.warn("Data {}.. not JSON".format(line[:15])) self.send_error() self._incr('unk_err') return # handle malformed data data.setdefault('id', 1) data.setdefault('params', []) if __debug__: self.logger.debug("Data {} recieved on client {}".format(data, self._id)) # run a different function depending on the action requested from # user if 'method' not in data: self.logger.warn("Empty action in JSON {}".format(self.peer_name[0])) self._incr('unk_err') self.send_error(id_val=data['id']) return meth = data['method'].lower() if meth == 'mining.subscribe': if self.subscribed is True: self.send_error(id_val=data['id']) return try: self.client_type = data['params'][0] except IndexError: pass ret = { 'result': ( ( # These values aren't used for anything, although # perhaps they should be ("mining.set_difficulty", self._id), ("mining.notify", self._id) ), self._id, self.manager.config['extranonce_size'] ), 'error': None, 'id': data['id'] } self.subscribed = True if __debug__: self.logger.debug("Sending subscribe response: {}" .format(pformat(ret))) self.write_queue.put(json.dumps(ret) + "\n") elif meth == "mining.authorize": if self.subscribed is False: self._incr('not_subbed_err') self.send_error(25, id_val=data['id']) return if self.authenticated is True: self._incr('not_authed_err') self.send_error(24, id_val=data['id']) return try: password = data['params'][1] username = data['params'][0] # allow the user to use the password field as an argument field try: args = password_arg_parser.parse_args(password.split()) except ArgumentParserError: # Ignore malformed parser data pass else: if args.diff: diff = max(self.config['minimum_manual_diff'], args.diff) self.difficulty = diff self.next_diff = diff except IndexError: password = "" username = "" self.manager.log_event( "{name}.auth:1|c".format(name=self.manager.config['procname'])) self.logger.info("Authentication request from {} for username {}" .format(self.peer_name[0], username)) user_worker = self.convert_username(username) # unpack into state dictionary self.address, self.worker = user_worker self.authenticated = True self.server.set_user(self) # notify of success authing and send him current diff and latest # job self.send_success(data['id']) self.push_difficulty() self.push_job() elif meth == "mining.submit": if self.authenticated is False: self._incr('not_authed_err') self.send_error(24, id_val=data['id']) return t = time.time() diff, typ = self.submit_job(data, t) # Log the share to our stat counters key = "" if typ > 0: key += "reject_" key += StratumClient.share_type_strings[typ] + "_share" if typ == 0: # Increment valid shares to calculate hashrate self._incr(key + "_n1", diff) self.manager.log_event( "{name}.{type}:1|c\n" "{name}.{type}_n1:{diff}|c\n" "{name}.submit_time:{t}|ms" .format(name=self.manager.config['procname'], type=key, diff=diff, t=(time.time() - t) * 1000)) # don't recalc their diff more often than interval if (self.config['vardiff']['enabled'] is True and (t - self.last_diff_adj) > self.config['vardiff']['interval']): self.recalc_vardiff() elif meth == "mining.get_transactions": self.send_error(id_val=data['id']) elif meth == "mining.extranonce.subscribe": self.send_success(id_val=data['id']) else: self.logger.info("Unkown action {} for command {}" .format(data['method'][:20], self.peer_name[0])) self._incr('unk_err') self.send_error(id_val=data['id'])
ValueError
dataset/ETHPy150Open simplecrypto/powerpool/powerpool/stratum_server.py/StratumClient.read
7,987
def flush(self): while self.buffers: filename_list = self.buffers.keys() for filename in filename_list: out = '\n'.join(self.buffers[filename]) + '\n' mid_dirs = os.path.dirname(filename) try: os.makedirs(mid_dirs) except __HOLE__, err: if err.errno == errno.EEXIST: pass else: raise try: with lock_file(filename, append=True, unlink=False) as f: f.write(out) except LockTimeout: # couldn't write, we'll try again later self.logger.debug(_('Timeout writing to %s' % filename)) else: del self.buffers[filename] self.total_size = 0
OSError
dataset/ETHPy150Open notmyname/slogging/slogging/file_buffer.py/FileBuffer.flush
7,988
def test_bad_driver(self): try: driver.DriverManager('stevedore.test.extension', 'e2') except __HOLE__: pass else: self.assertEquals(False, "No error raised")
ImportError
dataset/ETHPy150Open SickRage/SickRage/lib/stevedore/tests/test_driver.py/TestCallback.test_bad_driver
7,989
def test_sdist_extra_files(self): """Test that the extra files are correctly added.""" stdout, _, return_code = self.run_setup('sdist', '--formats=gztar') # There can be only one try: tf_path = glob.glob(os.path.join('dist', '*.tar.gz'))[0] except __HOLE__: assert False, 'source dist not found' tf = tarfile.open(tf_path) names = ['/'.join(p.split('/')[1:]) for p in tf.getnames()] self.assertIn('extra-file.txt', names)
IndexError
dataset/ETHPy150Open blue-yonder/pyscaffold/pyscaffold/contrib/pbr/pbr/tests/test_core.py/TestCore.test_sdist_extra_files
7,990
def generate_map(map, name='url_map'): """ Generates a JavaScript function containing the rules defined in this map, to be used with a MapAdapter's generate_javascript method. If you don't pass a name the returned JavaScript code is an expression that returns a function. Otherwise it's a standalone script that assigns the function with that name. Dotted names are resolved (so you an use a name like 'obj.url_for') In order to use JavaScript generation, simplejson must be installed. Note that using this feature will expose the rules defined in your map to users. If your rules contain sensitive information, don't use JavaScript generation! """ map.update() rules = [] converters = [] for rule in map.iter_rules(): trace = [{ 'is_dynamic': is_dynamic, 'data': data } for is_dynamic, data in rule._trace] rule_converters = {} for key, converter in rule._converters.iteritems(): js_func = js_to_url_function(converter) try: index = converters.index(js_func) except __HOLE__: converters.append(js_func) index = len(converters) - 1 rule_converters[key] = index rules.append({ u'endpoint': rule.endpoint, u'arguments': list(rule.arguments), u'converters': rule_converters, u'trace': trace, u'defaults': rule.defaults }) return _javascript_routing_template.render({ 'name_parts': name and name.split('.') or [], 'rules': dumps(rules), 'converters': converters })
ValueError
dataset/ETHPy150Open IanLewis/kay/kay/lib/werkzeug/contrib/jsrouting.py/generate_map
7,991
@cached_property def json(self): """If the mimetype is `application/json` this will contain the parsed JSON data. Otherwise this will be `None`. This requires Python 2.6 or an installed version of simplejson. """ if __debug__: _assert_have_json() if self.mimetype == 'application/json': request_charset = self.mimetype_params.get('charset') try: if request_charset is not None: return json.loads(self.data, encoding=request_charset) return json.loads(self.data) except __HOLE__, e: return self.on_json_loading_failed(e)
ValueError
dataset/ETHPy150Open baseblack/ReproWeb/3rdParty/python/flask/wrappers.py/Request.json
7,992
def get_status(interface): interface = Wireless(interface) try: stats = Iwstats(interface) except __HOLE__: return (None, None) quality = stats.qual.quality essid = interface.getEssid() return (essid, quality)
IOError
dataset/ETHPy150Open qtile/qtile/libqtile/widget/wlan.py/get_status
7,993
def __init__(self, package_json, subprocess_runner, working_copy, config_path): """Initialize this instance. Args: package_json: "package" dictionary read from JSON config file. subprocess_runner: Subprocess instance used to run commands. working_copy: Path to git working copy used to stage changes for this package. config_path: Path to the file the configuration was read from. Raises: ConfigJsonError: If an expected key isn't found in the JSON dictionary. """ self.package_json = package_json self.dependencies = [] self.name = '[unknown]' self.path = '' self.git_remote_upstream = '' self.git_remote_local = '' self.subprocess_runner = subprocess_runner self.working_copy = working_copy self.prebuilts = self.package_json.get('prebuilts', 0) self.revision = self.package_json.get('revision', '') self.fetch_dependencies = self.package_json.get('fetch_dependencies', 0) self.config_path = self.package_json.get('config_path', config_path) try: self.name = self.package_json['name'] self.url = self.package_json['url'] self.branch = self.package_json['branch'] self.is_library = self.package_json['is_library'] self.third_party = self.package_json['third_party'] self.push = self.package_json['push'] except __HOLE__ as e: raise ConfigJsonError('Package %s: Unable to find value (%s)' % ( self.name, str(e)))
KeyError
dataset/ETHPy150Open google/fplutil/disttools/push_package.py/Package.__init__
7,994
@staticmethod def parse_root_json(config_json, config_path, project_path, subprocess_runner, working_copy): """Parse root package from a dictionary read from a config.json file. Args: config_json: Dictionary parsed from config.json file. config_path: Path of the file config_json was parsed from (relative to the project). project_path: Local path to the project containing the config.json file. subprocess_runner: Subprocess instance used to run commands. working_copy: Directory to stage git changes in. Returns: Package instance representing the root package in the config file which should reference a set of dependencies. Raises: ConfigJsonError: If an expected key isn't found in the JSON dictionary. """ try: package_dict = config_json['package'] except __HOLE__ as e: raise ConfigJsonError('Package [root] not found: (%s)' % str(e)) package = Package(package_dict, subprocess_runner, working_copy, config_path) package.path = os.path.realpath(project_path) return package
KeyError
dataset/ETHPy150Open google/fplutil/disttools/push_package.py/Package.parse_root_json
7,995
def parse_dependencies_json(self, config_json, subprocess_runner, working_copy, config_reader, parent_package): """Parse dependencies from a dictionary read from a config.json file. All dependencies are added to the "dependencies" attribute. Args: config_json: Dictionary parsed from config.json file. subprocess_runner: Subprocess instance used to run commands. working_copy: Directory to stage git changes in. config_reader: Callable that read_config()'s signature used to recursively read dependencies of the package. parent_package: Package instance which is a parent of the dependencies specified in config_json. Raises: ConfigJsonError: If an expected key isn't found in the JSON dictionary. """ try: dependencies_list = config_json['dependencies'] except __HOLE__ as e: raise ConfigJsonError('Dependencies not found: (%s)' % str(e)) # Parse packages from the config and resolve all packages. additional_dependencies = parent_package.resolve_dependency_paths( [Package(dependency_dict, subprocess_runner, working_copy, parent_package.config_path) for dependency_dict in dependencies_list]) # Extend the list of dependencies with newly discovered packages. path_dependency_dict = dict([(d.path, d) for d in self.dependencies]) new_dependencies = [] for dependency in additional_dependencies: if (dependency.path not in path_dependency_dict and dependency.path != self.path): path_dependency_dict[dependency.path] = dependency new_dependencies.append(dependency) self.dependencies = path_dependency_dict.values() # Recursively read dependencies. for dependency in new_dependencies: if dependency.fetch_dependencies: self.parse_dependencies_json( config_reader(os.path.join(dependency.path, dependency.config_path)), subprocess_runner, working_copy, config_reader, dependency)
KeyError
dataset/ETHPy150Open google/fplutil/disttools/push_package.py/Package.parse_dependencies_json
7,996
def read_config(config_filename): """Read the configuration of the specified package. Args: config_filename: JSON file to read from config_dir. Returns: Dictionary read from the config.json file. Raises: ConfigJsonError: If the config.json file isn't found or it's malformed. """ try: with open(config_filename) as fileobject: return json.load(fileobject) except (__HOLE__, ValueError) as error: raise ConfigJsonError('Unable to read %s (%s)' % ( config_filename, str(error)))
OSError
dataset/ETHPy150Open google/fplutil/disttools/push_package.py/read_config
7,997
def value(self, column): """Return a scalar result corresponding to the given column expression.""" try: return next(self.values(column))[0] except __HOLE__: return None
StopIteration
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/orm/query.py/Query.value
7,998
def find_entry_point_for_command(command_name): """Return an entry point for the given rbtools command. If no entry point is found, None is returned. """ # Attempt to retrieve the command class from the entry points. We # first look in rbtools for the commands, and failing that, we look # for third-party commands. entry_point = pkg_resources.get_entry_info('rbtools', 'rbtools_commands', command_name) if not entry_point: try: entry_point = next(pkg_resources.iter_entry_points( 'rbtools_commands', command_name)) except __HOLE__: # There aren't any custom entry points defined. pass return entry_point
StopIteration
dataset/ETHPy150Open reviewboard/rbtools/rbtools/commands/__init__.py/find_entry_point_for_command
7,999
def _mkdirs(self, path): """Recursively creates directories.""" base = '' for part in path.split('/'): base += part + '/' try: self.sftp.lstat(base) except __HOLE__, e: if e.errno == errno.ENOENT: self.sftp.mkdir(base)
IOError
dataset/ETHPy150Open grow/grow/grow/deployments/destinations/scp.py/ScpDestination._mkdirs