Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
7,400
def _handle_lowerdim_multi_index_axis0(self, tup): # we have an axis0 multi-index, handle or raise try: # fast path for series or for tup devoid of slices return self._get_label(tup, axis=0) except __HOLE__: # slices are unhashable pass except Exception as e1: if isinstance(tup[0], (slice, Index)): raise IndexingError("Handle elsewhere") # raise the error if we are not sorted ax0 = self.obj._get_axis(0) if not ax0.is_lexsorted_for_tuple(tup): raise e1 return None
TypeError
dataset/ETHPy150Open pydata/pandas/pandas/core/indexing.py/_NDFrameIndexer._handle_lowerdim_multi_index_axis0
7,401
def _getitem_axis(self, key, axis=0): if self._should_validate_iterable(axis): self._has_valid_type(key, axis) labels = self.obj._get_axis(axis) if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) elif (is_list_like_indexer(key) and not (isinstance(key, tuple) and isinstance(labels, MultiIndex))): if hasattr(key, 'ndim') and key.ndim > 1: raise ValueError('Cannot index with multidimensional key') return self._getitem_iterable(key, axis=axis) else: # maybe coerce a float scalar to integer key = labels._maybe_cast_indexer(key) if is_integer(key): if axis == 0 and isinstance(labels, MultiIndex): try: return self._get_label(key, axis=axis) except (__HOLE__, TypeError): if self.obj.index.levels[0].is_integer(): raise # this is the fallback! (for a non-float, non-integer index) if not labels.is_floating() and not labels.is_integer(): return self._get_loc(key, axis=axis) return self._get_label(key, axis=axis)
KeyError
dataset/ETHPy150Open pydata/pandas/pandas/core/indexing.py/_NDFrameIndexer._getitem_axis
7,402
def _getitem_iterable(self, key, axis=0): if self._should_validate_iterable(axis): self._has_valid_type(key, axis) labels = self.obj._get_axis(axis) if is_bool_indexer(key): key = check_bool_indexer(labels, key) inds, = key.nonzero() return self.obj.take(inds, axis=axis, convert=False) else: if isinstance(key, Index): # want Index objects to pass through untouched keyarr = key else: # asarray can be unsafe, NumPy strings are weird keyarr = _asarray_tuplesafe(key) if com.is_categorical_dtype(labels): keyarr = labels._shallow_copy(keyarr) # have the index handle the indexer and possibly return # an indexer or raising indexer = labels._convert_list_indexer(keyarr, kind=self.name) if indexer is not None: return self.obj.take(indexer, axis=axis) # this is not the most robust, but... if (isinstance(labels, MultiIndex) and len(keyarr) and not isinstance(keyarr[0], tuple)): level = 0 else: level = None # existing labels are unique and indexer are unique if labels.is_unique and Index(keyarr).is_unique: try: result = self.obj.reindex_axis(keyarr, axis=axis, level=level) # this is an error as we are trying to find # keys in a multi-index that don't exist if isinstance(labels, MultiIndex) and level is not None: if (hasattr(result, 'ndim') and not np.prod(result.shape) and len(keyarr)): raise KeyError("cannot index a multi-index axis " "with these keys") return result except __HOLE__: # Series if axis != 0: raise AssertionError('axis must be 0') return self.obj.reindex(keyarr, level=level) # existing labels are non-unique else: # reindex with the specified axis if axis + 1 > self.obj.ndim: raise AssertionError("invalid indexing error with " "non-unique index") new_target, indexer, new_indexer = labels._reindex_non_unique( keyarr) if new_indexer is not None: result = self.obj.take(indexer[indexer != -1], axis=axis, convert=False) result = result._reindex_with_indexers( {axis: [new_target, new_indexer]}, copy=True, allow_dups=True) else: result = self.obj.take(indexer, axis=axis, convert=False) return result
AttributeError
dataset/ETHPy150Open pydata/pandas/pandas/core/indexing.py/_NDFrameIndexer._getitem_iterable
7,403
def _convert_to_indexer(self, obj, axis=0, is_setter=False): """ Convert indexing key into something we can use to do actual fancy indexing on an ndarray Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? "In the face of ambiguity, refuse the temptation to guess." raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing """ labels = self.obj._get_axis(axis) if isinstance(obj, slice): return self._convert_slice_indexer(obj, axis) # try to find out correct indexer, if not type correct raise try: obj = self._convert_scalar_indexer(obj, axis) except TypeError: # but we will allow setting if is_setter: pass # see if we are positional in nature is_int_index = labels.is_integer() is_int_positional = is_integer(obj) and not is_int_index # if we are a label return me try: return labels.get_loc(obj) except LookupError: if isinstance(obj, tuple) and isinstance(labels, MultiIndex): if is_setter and len(obj) == labels.nlevels: return {'key': obj} raise except __HOLE__: pass except (ValueError): if not is_int_positional: raise # a positional if is_int_positional: # if we are setting and its not a valid location # its an insert which fails by definition if is_setter: # always valid if self.name == 'loc': return {'key': obj} # a positional if (obj >= self.obj.shape[axis] and not isinstance(labels, MultiIndex)): raise ValueError("cannot set by positional indexing with " "enlargement") return obj if is_nested_tuple(obj, labels): return labels.get_locs(obj) elif is_list_like_indexer(obj): if is_bool_indexer(obj): obj = check_bool_indexer(labels, obj) inds, = obj.nonzero() return inds else: if isinstance(obj, Index): # want Index objects to pass through untouched objarr = obj else: objarr = _asarray_tuplesafe(obj) # The index may want to handle a list indexer differently # by returning an indexer or raising indexer = labels._convert_list_indexer(objarr, kind=self.name) if indexer is not None: return indexer # this is not the most robust, but... if (isinstance(labels, MultiIndex) and not isinstance(objarr[0], tuple)): level = 0 _, indexer = labels.reindex(objarr, level=level) # take all if indexer is None: indexer = np.arange(len(labels)) check = labels.levels[0].get_indexer(objarr) else: level = None # unique index if labels.is_unique: indexer = check = labels.get_indexer(objarr) # non-unique (dups) else: (indexer, missing) = labels.get_indexer_non_unique(objarr) check = indexer mask = check == -1 if mask.any(): raise KeyError('%s not in index' % objarr[mask]) return _values_from_object(indexer) else: try: return labels.get_loc(obj) except LookupError: # allow a not found key only if we are a setter if not is_list_like_indexer(obj) and is_setter: return {'key': obj} raise
TypeError
dataset/ETHPy150Open pydata/pandas/pandas/core/indexing.py/_NDFrameIndexer._convert_to_indexer
7,404
def _has_valid_type(self, key, axis): ax = self.obj._get_axis(axis) # valid for a label where all labels are in the index # slice of lables (where start-end in labels) # slice of integers (only if in the lables) # boolean if isinstance(key, slice): return True elif is_bool_indexer(key): return True elif is_list_like_indexer(key): # mi is just a passthru if isinstance(key, tuple) and isinstance(ax, MultiIndex): return True # TODO: don't check the entire key unless necessary if len(key) and np.all(ax.get_indexer_for(key) < 0): raise KeyError("None of [%s] are in the [%s]" % (key, self.obj._get_axis_name(axis))) return True else: def error(): if isnull(key): raise TypeError("cannot use label indexing with a null " "key") raise KeyError("the label [%s] is not in the [%s]" % (key, self.obj._get_axis_name(axis))) try: key = self._convert_scalar_indexer(key, axis) if key not in ax: error() except __HOLE__ as e: # python 3 type errors should be raised if 'unorderable' in str(e): # pragma: no cover error() raise except: error() return True
TypeError
dataset/ETHPy150Open pydata/pandas/pandas/core/indexing.py/_LocIndexer._has_valid_type
7,405
def convert_to_index_sliceable(obj, key): """if we are index sliceable, then return my slicer, otherwise return None """ idx = obj.index if isinstance(key, slice): return idx._convert_slice_indexer(key, kind='getitem') elif isinstance(key, compat.string_types): # we are an actual column if key in obj._data.items: return None # We might have a datetimelike string that we can translate to a # slice here via partial string indexing if idx.is_all_dates: try: return idx._get_string_slice(key) except (KeyError, ValueError, __HOLE__): return None return None
NotImplementedError
dataset/ETHPy150Open pydata/pandas/pandas/core/indexing.py/convert_to_index_sliceable
7,406
def do_keys(self, keys): """ Go from JWK description to binary keys :param keys: :return: """ for inst in keys: typ = inst["kty"] flag = 0 for _typ in [typ, typ.lower(), typ.upper()]: try: _key = K2C[_typ](**inst) except __HOLE__: continue else: self._keys.append(_key) flag = 1 break if not flag: raise UnknownKeyType(typ)
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyBundle.do_keys
7,407
def do_local_jwk(self, filename): try: self.do_keys(json.loads(open(filename).read())["keys"]) except __HOLE__: logger.error("Now 'keys' keyword in JWKS") raise UpdateFailed( "Local key update from '{}' failed.".format(filename)) else: self.last_updated = time.time()
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyBundle.do_local_jwk
7,408
def do_remote(self): args = {"verify": self.verify_ssl} if self.etag: args["headers"] = {"If-None-Match": self.etag} try: logging.debug('KeyBundle fetch keys from: {}'.format(self.source)) r = requests.get(self.source, **args) except requests.ConnectionError as e: raise UpdateFailed( "Remote key update from '{}' failed: {}.".format(self.source, str(e))) if r.status_code == 304: # file has not changed self.time_out = time.time() + self.cache_time self.last_updated = time.time() return False elif r.status_code == 200: # New content self.time_out = time.time() + self.cache_time self.imp_jwks = self._parse_remote_response(r) if not isinstance(self.imp_jwks, dict) or "keys" not in self.imp_jwks: raise UpdateFailed( "Remote key update from '{}' failed, malformed " "JWKS.".format( self.source)) logger.debug("Loaded JWKS: %s from %s" % (r.text, self.source)) try: self.do_keys(self.imp_jwks["keys"]) except KeyError: logger.error("No 'keys' keyword in JWKS") raise UpdateFailed( "Remote key update from '{}' failed.".format(self.source)) try: self.etag = r.headers["Etag"] except __HOLE__: pass else: raise UpdateFailed( "Remote key update from '{}' failed, HTTP status {}".format( self.source, r.status_code)) self.last_updated = time.time() return True
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyBundle.do_remote
7,409
def _parse_remote_response(self, response): """ Parse JWKS from the HTTP response. Should be overriden by subclasses for adding support of e.g. signed JWKS. :param response: HTTP response from the 'jwks_uri' endpoint :return: response parsed as JSON """ # Check if the content type is the right one. try: if response.headers["Content-Type"] != 'application/json': logger.warning('Wrong Content_type') except KeyError: pass logger.debug("Loaded JWKS: %s from %s" % (response.text, self.source)) try: return json.loads(response.text) except __HOLE__ as e: return None
ValueError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyBundle._parse_remote_response
7,410
def dump_jwks(kbl, target): """ Write a JWK to a file :param kbl: List of KeyBundles :param target: Name of the file to which everything should be written """ res = {"keys": []} for kb in kbl: # ignore simple keys res["keys"].extend([k.to_dict() for k in kb.keys() if k.kty != 'oct' and not k.inactive_since]) try: f = open(target, 'w') except __HOLE__: (head, tail) = os.path.split(target) os.makedirs(head) f = open(target, 'w') _txt = json.dumps(res) f.write(_txt) f.close()
IOError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/dump_jwks
7,411
def add(self, issuer, url, **kwargs): """ :param issuer: Who issued the keys :param url: Where can the key/-s be found :param kwargs: extra parameters for instantiating KeyBundle """ if not url: raise KeyError("No jwks_uri") if "/localhost:" in url or "/localhost/" in url: kc = self.keybundle_cls(source=url, verify_ssl=False, **kwargs) else: kc = self.keybundle_cls(source=url, verify_ssl=self.verify_ssl, **kwargs) try: self.issuer_keys[issuer].append(kc) except __HOLE__: self.issuer_keys[issuer] = [kc] return kc
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyJar.add
7,412
def add_kb(self, issuer, kb): try: self.issuer_keys[issuer].append(kb) except __HOLE__: self.issuer_keys[issuer] = [kb]
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyJar.add_kb
7,413
def get(self, key_use, key_type="", issuer="", kid=None, **kwargs): """ :param key_use: A key useful for this usage (enc, dec, sig, ver) :param key_type: Type of key (rsa, ec, symmetric, ..) :param issuer: Who is responsible for the keys, "" == me :param kid: A Key Identifier :return: A possibly empty list of keys """ if key_use in ["dec", "enc"]: use = "enc" else: use = "sig" if issuer != "": try: _keys = self.issuer_keys[issuer] except KeyError: if issuer.endswith("/"): try: _keys = self.issuer_keys[issuer[:-1]] except KeyError: _keys = [] else: try: _keys = self.issuer_keys[issuer + "/"] except __HOLE__: _keys = [] else: try: _keys = self.issuer_keys[issuer] except KeyError: _keys = [] lst = [] if _keys: for bundles in _keys: if key_type: _keys = bundles.get(key_type) else: _keys = bundles.keys() for key in _keys: if key.inactive_since and key_use != "ver": # Skip inactive keys unless for signature verification continue if kid and key.kid == kid: lst = [key] break if not key.use or use == key.use: lst.append(key) # if elliptic curve have to check I have a key of the right curve if key_type == "EC" and "alg" in kwargs: name = "P-{}".format(kwargs["alg"][2:]) # the type _lst = [] for key in lst: try: assert name == key.crv except AssertionError: pass else: _lst.append(key) lst = _lst return lst
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyJar.get
7,414
def __getitem__(self, issuer): try: return self.issuer_keys[issuer] except __HOLE__: logger.debug( "Available key issuers: {}".format(self.issuer_keys.keys())) raise
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyJar.__getitem__
7,415
def remove_key(self, issuer, key_type, key): try: kcs = self.issuer_keys[issuer] except __HOLE__: return for kc in kcs: kc.remove_key(key_type, key) if len(kc) == 0: self.issuer_keys[issuer].remove(kc)
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyJar.remove_key
7,416
def update(self, kj): for key, val in kj.issuer_keys.items(): if isinstance(val, string_types): val = [val] elif not isinstance(val, list): val = [val] try: self.issuer_keys[key].extend(val) except __HOLE__: self.issuer_keys[key] = val
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyJar.update
7,417
def load_keys(self, pcr, issuer, replace=False): """ Fetch keys from another server :param pcr: The provider information :param issuer: The provider URL :param replace: If all previously gathered keys from this provider should be replace. :return: Dictionary with usage as key and keys as values """ logger.debug("loading keys for issuer: %s" % issuer) try: logger.debug("pcr: %s" % pcr) except MessageException: pass if replace or issuer not in self.issuer_keys: self.issuer_keys[issuer] = [] try: self.add(issuer, pcr["jwks_uri"]) except __HOLE__: # jwks should only be considered if no jwks_uri is present try: _keys = pcr["jwks"]["keys"] self.issuer_keys[issuer].append(self.keybundle_cls(_keys)) except KeyError: pass
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyJar.load_keys
7,418
def find(self, source, issuer): """ Find a key bundle :param source: A url :param issuer: The issuer of keys """ try: for kb in self.issuer_keys[issuer]: if kb.source == source: return kb except __HOLE__: return None
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyJar.find
7,419
def dump_issuer_keys(self, issuer): res = [] try: for kb in self.issuer_keys[issuer]: res.extend([k.to_dict() for k in kb.keys()]) except __HOLE__: pass return res
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/KeyJar.dump_issuer_keys
7,420
def key_export(baseurl, local_path, vault, keyjar, **kwargs): """ :param baseurl: The base URL to which the key file names are added :param local_path: Where on the machine the export files are kept :param vault: Where the keys are kept :param keyjar: Where to store the exported keys :return: 2-tuple: result of urlsplit and a dictionary with parameter name as key and url and value """ part = urlsplit(baseurl) # deal with the export directory if part.path.endswith("/"): _path = part.path[:-1] else: _path = part.path[:] local_path = proper_path("%s/%s" % (_path, local_path)) if not os.path.exists(local_path): os.makedirs(local_path) kb = key_setup(vault, **kwargs) try: keyjar[""].append(kb) except __HOLE__: keyjar[""] = kb # the local filename _export_filename = os.path.join(local_path, "jwks") with open(_export_filename, "w") as f: f.write(str(kb)) _url = "%s://%s%s" % (part.scheme, part.netloc, _export_filename[1:]) return _url # ================= create RSA key ======================
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/key_export
7,421
def rsa_init(spec): """ :param spec: :return: KeyBundle """ arg = {} for param in ["name", "path", "size"]: try: arg[param] = spec[param] except __HOLE__: pass kb = KeyBundle(keytype="RSA", keyusage=spec["use"]) for use in spec["use"]: _key = create_and_store_rsa_key_pair(**arg) kb.append(RSAKey(use=use, key=_key)) return kb
KeyError
dataset/ETHPy150Open rohe/pyoidc/src/oic/utils/keyio.py/rsa_init
7,422
def _get_box(self, is_originator, uri, match_exact=False): try: if match_exact: key = self._uri_to_key[uri] else: key = self._uri_to_key.longest_prefix_value(uri) except __HOLE__: if self._default_key: key = self._default_key else: return None if is_originator: return key.originator_box else: return key.responder_box
KeyError
dataset/ETHPy150Open crossbario/autobahn-python/autobahn/wamp/cryptobox.py/KeyRing._get_box
7,423
def _install_module(self, fullname): top, username, repo, modname = self._parse_fullname(fullname) url = 'https://raw.githubusercontent.com/%s/%s/master/%s' % (username, repo, modname+'.py') print('Downloading: ', url) try: tmp_file, resp = urlretrieve(url) with open(tmp_file, 'r') as f: new_content = f.read() if new_content=='Not Found': raise InstallError('remote file does not exist') except __HOLE__: raise InstallError('error downloading file') new = tmp_file old = self._install_path(fullname) updated = self._update_if_changed(old, new) if updated=='updated': print('Updating module: ', fullname) elif updated=='installed': print('Installing module: ', fullname) elif updated=='noaction': print('Using existing version: ', fullname)
IOError
dataset/ETHPy150Open ellisonbg/antipackage/antipackage.py/GitHubImporter._install_module
7,424
def serialize(self, queryset): try: return self.serializer(levels=self.levels, fields=self.fields, exclude=self.exclude).serialize(queryset) except __HOLE__, e: logging.warn("API Error: %s" % e) if settings.debug: return "API Error: %s" % e else: return ''
ValidationError
dataset/ETHPy150Open letolab/airy/airy/contrib/api/handlers.py/APIHandler.serialize
7,425
def search(self, cardinal, user, channel, msg): # Before we do anything, let's make sure we'll be able to query YouTube if self.api_key is None: cardinal.sendMsg(channel, "YouTube plugin is not configured correctly. Please set API key.") # Grab the search query try: search_query = msg.split(' ', 1)[1] except IndexError: cardinal.sendMsg(channel, "Syntax: .youtube <search query>") return params = {'q': search_query, 'part': 'snippet', 'maxResults': 1, 'type': 'video'} try: result = self._form_request("search", params) except Exception, e: cardinal.sendMsg(channel, "Unable to connect to YouTube.") self.logger.exception("Failed to connect to YouTube") return if 'error' in result: cardinal.sendMsg(channel, "An error occurred while attempting to search YouTube.") self.logger.error( "Error attempting to search YouTube: %s" % content['error'] ) return try: video_id = str(result['items'][0]['id']['videoId'].encode('utf-8')) params = { 'id': video_id, 'maxResults': 1, 'part': 'snippet,statistics' } except IndexError: cardinal.sendMsg(channel, "No videos found matching that search.") return try: result = self._form_request("videos", params) except Exception, e: cardinal.sendMsg(channel, "Unable to connect to YouTube.") self.logger.exception("Failed to connect to YouTube") return try: message = self._parse_item(result['items'][0]) cardinal.sendMsg(channel, message) except __HOLE__: cardinal.sendMsg(channel, "No videos found matching that search.") except Exception, e: self.logger.exception("Failed to parse info for %s'" % video_id) raise EventRejectedMessage
IndexError
dataset/ETHPy150Open JohnMaguire/Cardinal/plugins/youtube/plugin.py/YouTubePlugin.search
7,426
def test_str(self): """ The string representation of a L{DelayedCall} instance, as returned by C{str}, includes the unsigned id of the instance, as well as its state, the function to be called, and the function arguments. """ def nothing(): pass dc = DelayedCall(12, nothing, (3, ), {"A": 5}, None, None, lambda: 1.5) ids = {dc: 200} def fakeID(obj): try: return ids[obj] except (__HOLE__, KeyError): return id(obj) self.addCleanup(setIDFunction, setIDFunction(fakeID)) self.assertEqual( str(dc), "<DelayedCall 0xc8 [10.5s] called=0 cancelled=0 nothing(3, A=5)>")
TypeError
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/internet/test/test_base.py/DelayedCallTests.test_str
7,427
def _parse_object(value): """ :param value: The string to parse :type value: str :returns: The parsed value :rtype: dict """ try: json_object = json.loads(value) if json_object is None or isinstance(json_object, collections.Mapping): return json_object else: raise DCOSException( 'Unable to parse {!r} as a JSON object'.format(value)) except __HOLE__ as error: logger.exception('Error parsing value as a JSON object') msg = 'Unable to parse {!r} as a JSON object: {}'.format(value, error) raise DCOSException(msg)
ValueError
dataset/ETHPy150Open dcos/dcos-cli/dcos/jsonitem.py/_parse_object
7,428
def _parse_number(value): """ :param value: The string to parse :type value: str :returns: The parsed value :rtype: float """ try: return None if value == 'null' else float(value) except __HOLE__ as error: logger.exception('Error parsing value as a JSON number') msg = 'Unable to parse {!r} as a float: {}'.format(value, error) raise DCOSException(msg)
ValueError
dataset/ETHPy150Open dcos/dcos-cli/dcos/jsonitem.py/_parse_number
7,429
def _parse_integer(value): """ :param value: The string to parse :type value: str :returns: The parsed value :rtype: int """ try: return None if value == 'null' else int(value) except __HOLE__ as error: logger.exception('Error parsing value as a JSON integer') msg = 'Unable to parse {!r} as an int: {}'.format(value, error) raise DCOSException(msg)
ValueError
dataset/ETHPy150Open dcos/dcos-cli/dcos/jsonitem.py/_parse_integer
7,430
def _parse_boolean(value): """ :param value: The string to parse :type value: str :returns: The parsed value :rtype: bool """ try: boolean = json.loads(value.lower()) if boolean is None or isinstance(boolean, bool): return boolean else: raise DCOSException( 'Unable to parse {!r} as a boolean'.format(value)) except __HOLE__ as error: logger.exception('Error parsing value as a JSON boolean') msg = 'Unable to parse {!r} as a boolean: {}'.format(value, error) raise DCOSException(msg)
ValueError
dataset/ETHPy150Open dcos/dcos-cli/dcos/jsonitem.py/_parse_boolean
7,431
def _parse_array(value): """ :param value: The string to parse :type value: str :returns: The parsed value :rtype: list """ try: array = json.loads(value) if array is None or isinstance(array, collections.Sequence): return array else: raise DCOSException( 'Unable to parse {!r} as an array'.format(value)) except __HOLE__ as error: logger.exception('Error parsing value as a JSON array') msg = 'Unable to parse {!r} as an array: {}'.format(value, error) raise DCOSException(msg)
ValueError
dataset/ETHPy150Open dcos/dcos-cli/dcos/jsonitem.py/_parse_array
7,432
def geo_lookup(self, search_term, geo_type=None): """ Search for geoids based upon name of geography Returns a response that maps the incoming search term to the geoid: { 'term': <search_term>, 'geoid': '<full_geoid>', } """ if geo_type == 'congress_district': geoid = None dist, st = search_term.rsplit(',', 1) fips = self.lookup_state(st.strip(), attr='fips') try: dist_num = str(int(dist.split(' ')[-1])) except __HOLE__: dist_num = '00' if fips and dist_num: geoid = '50000US{0}{1}'\ .format(fips, dist_num.zfill(2)) return { 'term': search_term, 'geoid': geoid } regex = re.compile('[%s]' % re.escape(punctuation)) search_term = regex.sub('', search_term) if geo_type in ['census_tract', 'state_fips']: return { 'term': search_term, 'geoid': '%s00US%s' % (SUMLEV_LOOKUP[geo_type], search_term) } if geo_type == 'state_county_fips': resp = { 'term': search_term, 'geoid': None } g = StateCountyFIPS() valid, message = g.validate([search_term]) if valid: resp['geoid'] = '05000US%s' % search_term return resp q_dict = {'q': search_term} if geo_type: q_dict['sumlevs'] = SUMLEV_LOOKUP[geo_type] if geo_type == 'zip_5': q_dict['q'] = search_term.zfill(5) if geo_type == 'state': q_dict['q'] = self.lookup_state(search_term) q_dict = encoded_dict(q_dict) params = urlencode(q_dict) try: response = self.urlopen('%s/geo/search?%s' % (self.base_url, params)) except scrapelib.HTTPError, e: try: body = json.loads(e.body.json()['error']) except ValueError: body = None raise MancerError('Census Reporter API returned a %s status' \ % response.status_code, body=body) results = json.loads(response) try: results = { 'term': search_term, 'geoid': results['results'][0]['full_geoid'] } except IndexError: results = { 'term': search_term, 'geoid': None, } return results
ValueError
dataset/ETHPy150Open associatedpress/geomancer/geomancer/mancers/census_reporter.py/CensusReporter.geo_lookup
7,433
def _try_search(self, gids, columns, bad_gids=[]): query = { 'table_ids': ','.join(columns), 'geo_ids': ','.join(sorted([g[1] for g in gids])), } params = urlencode(query) try: response = self.urlopen('%s/data/show/latest?%s' % (self.base_url, params)) except scrapelib.HTTPError, e: try: body = json.loads(e.body.json()['error']) except ValueError: body = None except __HOLE__: body = e.body if 'The ACS 2013 5-year release doesn\'t include GeoID(s)' in body: error = json.loads(body) bad_gids.append(error['error'].rsplit(' ',1)[1].replace('.', '')) for idx,gid in enumerate(gids): if gid[1] in bad_gids: gids.pop(idx) response = self._try_search(gids, columns, bad_gids=bad_gids) else: raise MancerError('Census Reporter API returned an error', body=body) return response
AttributeError
dataset/ETHPy150Open associatedpress/geomancer/geomancer/mancers/census_reporter.py/CensusReporter._try_search
7,434
def __getattr__(self, k): try: return object.__getattr__(self, k) except __HOLE__: return self.__class__(endpoint=self.endpoint, methodname='.'.join((self.methodname, k)).lstrip('.'), user_agent=self.user_agent, oauth_consumer=self.oauth_consumer, cache=self.cache)
AttributeError
dataset/ETHPy150Open JeremyGrosser/python-digg/digg/api.py/DiggCall.__getattr__
7,435
def _records_list_from(path, ext): try: files = _files_in(path, ext) except __HOLE__: return [] return [f.split('_')[-1].split('.')[0] for f in files]
OSError
dataset/ETHPy150Open omangin/multimodal/multimodal/db/models/vctk.py/_records_list_from
7,436
def _build_record(self, name, speaker_id): prefix = self._get_speaker_dir(speaker_id) + '_' + name txt_path = os.path.join(self.get_txt_dir(speaker_id), prefix + '.txt') try: with open(txt_path, 'r') as f: transcription = f.read() except __HOLE__: transcription = None audio = prefix + '.wav' full_audio_path = os.path.join(self.get_wav_dir(speaker_id), audio) if not os.path.exists(full_audio_path): audio = None tags = [] # TODO: eventually extract words / radicals r = Record(self, speaker_id, audio, tags, transcription, STYLE) self.add_record(r)
IOError
dataset/ETHPy150Open omangin/multimodal/multimodal/db/models/vctk.py/VCTKDB._build_record
7,437
def worker_pool(popen_arg, num_workers, job_iterable, stop_when_jobs_done=True, stop_on_keyboard_interrupt=True, popen=None): """ Coroutine to manage a pool of workers that accept jobs as single lines of input on stdin and produces results as single lines of output. popen_arg - parameter to pass to subprocess.Popen when creating workers num_workers - maximum number of workers to create job_iterable - iterable producing (job id, job string) tuples where job string should include a single trailing newline stop_when_jobs_done - True: generator exits when all jobs are done stop_on_keyboard_interrupt - True: generator exits on KeyboardIterrupt accepted to send(): job iterable or None, when a new job iterable is sent it will replace the previous one used for assigning jobs to workers This generator blocks until there is a result from one of the workers. yields (currently processing job id list, finished job id, job result) tuples as jobs are completed, or (None, None, None) when no jobs remain to be completed and stop_when_jobs_done is False. currently processing job id list will include None if some workers are idle. job result will include trailing newline. when no jobs remain to be completed and stop_when_jobs_done is False a new job iterable must be sent to this generator with send(). """ if popen is None: popen = subprocess.Popen workers = [] job_ids = [] worker_fds = {} job_iter = iter(job_iterable) def start_job(worker=None): """ assign a job to exiting or newly created worker subprocess. returns (job_id, worker) or (None, None) when no more jobs """ job_id, job_str = next(job_iter, (None, None)) if job_str is None: return None, None job_str = job_str.rstrip(b'\n') + b'\n' if not worker: worker = popen( popen_arg, stdin=subprocess.PIPE, stdout=subprocess.PIPE, ) worker.stdin.write(job_str) worker.stdin.flush() return (job_id, worker) def assign_jobs(): """ start as many jobs as possible given maximum/idle workers and available jobs """ while None in job_ids: wnum = job_ids.index(None) job_ids[wnum], w = start_job(workers[wnum]) if w is None: return while len(workers) < num_workers: job_id, w = start_job() if w is None: return worker_fds[w.stdout] = len(workers) workers.append(w) job_ids.append(job_id) try: assign_jobs() while True: if all(i is None for i in job_ids): if stop_when_jobs_done: return new_jobs = yield (None, None, None) # require new jobs to be submitted job_iter = iter(new_jobs) assign_jobs() continue try: readable, _, _ = select.select(worker_fds, [], []) except select.error as e: if e.args[0] == 10038: # XXX: no many-worker support on windows yet readable = list(worker_fds)[:1] else: raise except __HOLE__: if stop_on_keyboard_interrupt: return raise fd = readable[0] wnum = worker_fds[fd] w = workers[wnum] result = w.stdout.readline() finished = job_ids[wnum] job_ids[wnum], _ = start_job(w) new_jobs = yield (job_ids, finished, result) if new_jobs: job_iter = iter(new_jobs) assign_jobs() finally: for w in workers: w.stdin.close()
KeyboardInterrupt
dataset/ETHPy150Open ckan/ckanapi/ckanapi/cli/workers.py/worker_pool
7,438
def jwt_payload_handler(user): from jwt_auth import settings try: username = user.get_username() except __HOLE__: username = user.username return { 'user_id': user.pk, 'email': user.email, 'username': username, 'exp': datetime.utcnow() + settings.JWT_EXPIRATION_DELTA }
AttributeError
dataset/ETHPy150Open jpadilla/django-jwt-auth/jwt_auth/utils.py/jwt_payload_handler
7,439
def import_from_string(val): """ Attempt to import a class from a string representation. From: https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/settings.py """ try: # Nod to tastypie's use of importlib. parts = val.split('.') module_path, class_name = '.'.join(parts[:-1]), parts[-1] module = importlib.import_module(module_path) return getattr(module, class_name) except __HOLE__ as e: msg = "Could not import '%s' for setting. %s: %s." % (val, e.__class__.__name__, e) raise ImportError(msg)
ImportError
dataset/ETHPy150Open jpadilla/django-jwt-auth/jwt_auth/utils.py/import_from_string
7,440
def run_setup(setup_script, args): """Run a distutils setup script, sandboxed in its directory""" setup_dir = os.path.abspath(os.path.dirname(setup_script)) with setup_context(setup_dir): try: sys.argv[:] = [setup_script]+list(args) sys.path.insert(0, setup_dir) # reset to include setup dir, w/clean callback list working_set.__init__() working_set.callbacks.append(lambda dist:dist.activate()) def runner(): ns = dict(__file__=setup_script, __name__='__main__') _execfile(setup_script, ns) DirectorySandbox(setup_dir).run(runner) except __HOLE__ as v: if v.args and v.args[0]: raise # Normal exit, just return
SystemExit
dataset/ETHPy150Open chalasr/Flask-P2P/venv/lib/python2.7/site-packages/setuptools/sandbox.py/run_setup
7,441
def get_lightcurve(self, star_id, return_1d=True): """Get the light curves for the given ID Parameters ---------- star_id : int A valid integer star id representing an object in the dataset return_1d : boolean (default=True) Specify whether to return 1D arrays of (t, y, dy, filts) or 2D arrays of (t, y, dy) where each column is a filter. Returns ------- t, y, dy : np.ndarrays (if return_1d == False) Times, magnitudes, and magnitude errors. The shape of each array is [Nobs, 5], where the columns refer to [u,g,r,i,z] bands. Non-observations are indicated by NaN. t, y, dy, filts : np.ndarrays (if return_1d == True) Times, magnitudes, magnitude errors, and filters The shape of each array is [Nobs], and non-observations are filtered out. """ filename = '{0}/{1}.dat'.format(self.dirname, star_id) try: data = np.loadtxt(self.data.extractfile(filename)) except __HOLE__: raise ValueError("invalid star id: {0}".format(star_id)) RA = data[:, 0] DEC = data[:, 1] t = data[:, 2::3] y = data[:, 3::3] dy = data[:, 4::3] nans = (y == -99.99) t[nans] = np.nan y[nans] = np.nan dy[nans] = np.nan if return_1d: t, y, dy, filts = np.broadcast_arrays(t, y, dy, ['u', 'g', 'r', 'i', 'z']) good = ~np.isnan(t) return t[good], y[good], dy[good], filts[good] else: return t, y, dy
KeyError
dataset/ETHPy150Open astroML/gatspy/gatspy/datasets/rrlyrae.py/RRLyraeLC.get_lightcurve
7,442
def get_template(self, template_id): """Get a particular lightcurve template Parameters ---------- template_id : str id of desired template Returns ------- phase : ndarray array of phases mag : ndarray array of normalized magnitudes """ try: data = np.loadtxt(self.data.extractfile(template_id + '.dat')) except __HOLE__: raise ValueError("invalid star id: {0}".format(template_id)) return data[:, 0], data[:, 1]
KeyError
dataset/ETHPy150Open astroML/gatspy/gatspy/datasets/rrlyrae.py/RRLyraeTemplates.get_template
7,443
def ReadDownloadedUrl(url, corpus): """Reads a downloaded URL from disk. Args: url: The URL to read. corpus: The corpus the URL belongs to. Returns: The content of the URL. """ try: with open('%s/downloads/%s.html' % (corpus, Hashhex(url))) as f: return f.read() except __HOLE__: return None
IOError
dataset/ETHPy150Open deepmind/rc-data/generate_questions.py/ReadDownloadedUrl
7,444
def DownloadUrl(url, corpus, max_attempts=5, timeout=5): """Downloads a URL. Args: url: The URL. corpus: The corpus of the URL. max_attempts: Max attempts for downloading the URL. timeout: Connection timeout in seconds for each attempt. Returns: The HTML at the URL or None if the request failed. """ try: with open('%s/downloads/%s.html' % (corpus, Hashhex(url))) as f: return f.read() except __HOLE__: pass attempts = 0 while attempts < max_attempts: try: req = requests.get(url, allow_redirects=False, timeout=timeout) if req.status_code == requests.codes.ok: content = req.text.encode(req.encoding) with open('%s/downloads/%s.html' % (corpus, Hashhex(url)), 'w') as f: f.write(content) return content elif (req.status_code in [301, 302, 404, 503] and attempts == max_attempts - 1): return None except requests.exceptions.ConnectionError: pass except requests.exceptions.ContentDecodingError: return None except requests.exceptions.ChunkedEncodingError: return None except requests.exceptions.Timeout: pass except socket.timeout: pass # Exponential back-off. time.sleep(math.pow(2, attempts)) attempts += 1 return None
IOError
dataset/ETHPy150Open deepmind/rc-data/generate_questions.py/DownloadUrl
7,445
def DownloadMode(corpus, request_parallelism): """Downloads the URLs for the specified corpus. Args: corpus: A corpus. request_parallelism: The number of concurrent download requests. """ missing_urls = [] for dataset in datasets: print 'Downloading URLs for the %s set:' % dataset urls_filename = '%s/wayback_%s_urls.txt' % (corpus, dataset) urls = ReadUrls(urls_filename) missing_urls_filename = '%s/missing_urls.txt' % corpus if os.path.exists(missing_urls_filename): print 'Only downloading missing URLs' urls = list(set(urls).intersection(ReadUrls(missing_urls_filename))) p = ThreadPool(request_parallelism) results = p.imap_unordered(DownloadMapper, izip(urls, repeat(corpus))) progress_bar = ProgressBar(len(urls)) collected_urls = [] try: for url, story_html in results: if story_html: collected_urls.append(url) progress_bar.Increment() except __HOLE__: print 'Interrupted by user' missing_urls.extend(set(urls) - set(collected_urls)) WriteUrls('%s/missing_urls.txt' % corpus, missing_urls) if missing_urls: print ('%d URLs couldn\'t be downloaded, see %s/missing_urls.txt.' % (len(missing_urls), corpus)) print 'Try and run the command again to download the missing URLs.'
KeyboardInterrupt
dataset/ETHPy150Open deepmind/rc-data/generate_questions.py/DownloadMode
7,446
def is_feasible(language, commands): # This test should always work, otherwise the compiler is not present. routine = make_routine("test", x) numerical_tests = [ ("test", ( 1.0,), 1.0, 1e-15), ("test", (-1.0,), -1.0, 1e-15), ] try: run_test("is_feasible", [routine], numerical_tests, language, commands, friendly=False) return True except __HOLE__: return False
AssertionError
dataset/ETHPy150Open sympy/sympy/sympy/external/tests/test_codegen.py/is_feasible
7,447
def testStressfully(self): # Try a variety of sizes at and around powers of 2, and at powers of 10. sizes = [0] for power in range(1, 10): n = 2 ** power sizes.extend(range(n-1, n+2)) sizes.extend([10, 100, 1000]) class Complains(object): maybe_complain = True def __init__(self, i): self.i = i def __lt__(self, other): if Complains.maybe_complain and random.random() < 0.001: if verbose: print(" complaining at", self, other) raise RuntimeError return self.i < other.i def __repr__(self): return "Complains(%d)" % self.i class Stable(object): def __init__(self, key, i): self.key = key self.index = i def __lt__(self, other): return self.key < other.key def __repr__(self): return "Stable(%d, %d)" % (self.key, self.index) for n in sizes: x = list(range(n)) if verbose: print("Testing size", n) s = x[:] check("identity", x, s) s = x[:] s.reverse() check("reversed", x, s) s = x[:] random.shuffle(s) check("random permutation", x, s) y = x[:] y.reverse() s = x[:] check("reversed via function", y, s, lambda a, b: (b>a)-(b<a)) if verbose: print(" Checking against an insane comparison function.") print(" If the implementation isn't careful, this may segfault.") s = x[:] s.sort(key=cmp_to_key(lambda a, b: int(random.random() * 3) - 1)) check("an insane function left some permutation", x, s) if len(x) >= 2: def bad_key(x): raise RuntimeError s = x[:] self.assertRaises(RuntimeError, s.sort, key=bad_key) x = [Complains(i) for i in x] s = x[:] random.shuffle(s) Complains.maybe_complain = True it_complained = False try: s.sort() except __HOLE__: it_complained = True if it_complained: Complains.maybe_complain = False check("exception during sort left some permutation", x, s) s = [Stable(random.randrange(10), i) for i in range(n)] augmented = [(e, e.index) for e in s] augmented.sort() # forced stable because ties broken by index x = [e for e, i in augmented] # a stable sort of s check("stability", x, s) #==============================================================================
RuntimeError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_sort.py/TestBase.testStressfully
7,448
def getid(obj): """ Abstracts the common pattern of allowing both an object or an object's ID as a parameter when dealing with relationships. """ try: return obj.id except __HOLE__: return obj
AttributeError
dataset/ETHPy150Open kwminnick/rackspace-dns-cli/dnsclient/base.py/getid
7,449
def _list(self, url, response_key, obj_class=None, body=None): if body: _resp, body = self.api.client.post(url, body=body) else: _resp, body = self.api.client.get(url) if obj_class is None: obj_class = self.resource_class data = body[response_key] # NOTE(ja): keystone returns values as list as {'values': [ ... ]} # unlike other services which just return the list... if isinstance(data, dict): try: data = data['values'] except __HOLE__: pass with self.completion_cache('human_id', obj_class, mode="w"): with self.completion_cache('uuid', obj_class, mode="w"): return [obj_class(self, res, loaded=True) for res in data if res]
KeyError
dataset/ETHPy150Open kwminnick/rackspace-dns-cli/dnsclient/base.py/Manager._list
7,450
@contextlib.contextmanager def completion_cache(self, cache_type, obj_class, mode): """ The completion cache store items that can be used for bash autocompletion, like UUIDs or human-friendly IDs. A resource listing will clear and repopulate the cache. A resource create will append to the cache. Delete is not handled because listings are assumed to be performed often enough to keep the cache reasonably up-to-date. """ base_dir = utils.env('NOVACLIENT_UUID_CACHE_DIR', default="~/.novaclient") # NOTE(sirp): Keep separate UUID caches for each username + endpoint # pair username = utils.env('OS_USERNAME', 'NOVA_USERNAME') url = utils.env('OS_URL', 'NOVA_URL') uniqifier = hashlib.md5(username + url).hexdigest() cache_dir = os.path.expanduser(os.path.join(base_dir, uniqifier)) try: os.makedirs(cache_dir, 0755) except OSError: # NOTE(kiall): This is typicaly either permission denied while # attempting to create the directory, or the directory # already exists. Either way, don't fail. pass resource = obj_class.__name__.lower() filename = "%s-%s-cache" % (resource, cache_type.replace('_', '-')) path = os.path.join(cache_dir, filename) cache_attr = "_%s_cache" % cache_type try: setattr(self, cache_attr, open(path, mode)) except __HOLE__: # NOTE(kiall): This is typicaly a permission denied while # attempting to write the cache file. pass try: yield finally: cache = getattr(self, cache_attr, None) if cache: cache.close() delattr(self, cache_attr)
IOError
dataset/ETHPy150Open kwminnick/rackspace-dns-cli/dnsclient/base.py/Manager.completion_cache
7,451
def findall(self, **kwargs): """ Find all items with attributes matching ``**kwargs``. This isn't very efficient: it loads the entire list then filters on the Python side. """ found = [] searches = kwargs.items() for obj in self.list(): try: if all(getattr(obj, attr) == value for (attr, value) in searches): found.append(obj) except __HOLE__: continue return found
AttributeError
dataset/ETHPy150Open kwminnick/rackspace-dns-cli/dnsclient/base.py/ManagerWithFind.findall
7,452
def _add_details(self, info): for (k, v) in info.iteritems(): try: setattr(self, k, v) except __HOLE__: # In this case we already defined the attribute on the class pass
AttributeError
dataset/ETHPy150Open kwminnick/rackspace-dns-cli/dnsclient/base.py/Resource._add_details
7,453
def _parse_version(text): "Internal parsing method. Factored out for testing purposes." major, major2, minor = VERSION_RE.search(text).groups() try: return int(major), int(major2), int(minor) except (ValueError, __HOLE__): return int(major), int(major2), None
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/db/backends/postgresql/version.py/_parse_version
7,454
def get_data(self, params, view_instance, view_method, request, args, kwargs): lookup_value = view_instance.kwargs[view_instance.lookup_field] try: queryset = view_instance.filter_queryset(view_instance.get_queryset()).filter( **{view_instance.lookup_field: lookup_value} ) except __HOLE__: return None else: return self._get_queryset_query_string(queryset)
ValueError
dataset/ETHPy150Open chibisov/drf-extensions/rest_framework_extensions/key_constructor/bits.py/RetrieveSqlQueryKeyBit.get_data
7,455
def __init__(self, **config): # Validate options are present for option in _configuration_options: if option not in config: raise ValueError("Missing configuration " "option {!r}".format(option)) # Feature extraction sparse_features = parse_features(config["sparse_features"]) densifier = make_pipeline(Vectorizer(sparse_features, sparse=True), ClassifierAsFeature()) dense_features = parse_features(config["dense_features"]) vectorization = make_union(densifier, Vectorizer(dense_features, sparse=False)) # Classifier try: classifier = _valid_classifiers[config["classifier"]] except __HOLE__: raise ValueError("Unknown classification algorithm " "{!r}".format(config["classifier"])) classifier = classifier(**config["classifier_args"]) self.pipeline = make_pipeline(vectorization, StandardScaler()) self.classifier = classifier
KeyError
dataset/ETHPy150Open machinalis/iepy/iepy/extraction/relation_extraction_classifier.py/RelationExtractionClassifier.__init__
7,456
def details(self): """ The Repair Details API includes the shipment information similar to the Repair Lookup API. >>> Repair('G135773004').details() #doctest: +ELLIPSIS {'isACPlusConsumed': 'N', 'configuration': 'IPAD 3RD GEN,WIFI+CELLULAR,16GB,BLACK',... """ self._namespace = "core:" details = self._submit("RepairDetailsRequest", "RepairDetails", "lookupResponseData") # fix tracking URL, if available for i, p in enumerate(details.partsInfo): try: url = p.carrierURL.replace('<<TRKNO>>', str(p.deliveryTrackingNumber)) details.partsInfo[i].carrierURL = url except __HOLE__: pass self._details = details return details
AttributeError
dataset/ETHPy150Open filipp/py-gsxws/gsxws/repairs.py/Repair.details
7,457
def load_yaml(config_files): """Load the config files based on environment settings Will try to load the first file in the list that it can find. Args: config_files(list): A list of config files, in the order that they will be checked and loaded. Returns: A dict mapping of the yaml file. Raises: IOError: No config file can be successfully loaded or found. """ for config_file in config_files: try: return yaml.load(open(config_file, 'r').read()) except (__HOLE__, IOError): continue raise IOError('Dataduct config file is missing')
OSError
dataset/ETHPy150Open coursera/dataduct/dataduct/config/config.py/load_yaml
7,458
def handle(self, **options): current_site = Site.objects.get_current() client = DisqusClient() verbosity = int(options.get('verbosity')) dry_run = bool(options.get('dry_run')) state_file = options.get('state_file') last_exported_id = None if state_file is not None and os.path.exists(state_file): last_exported_id = self._get_last_state(state_file) comments = self._get_comments_to_export(last_exported_id) comments_count = comments.count() if verbosity >= 1: print("Exporting %d comment(s)" % comments_count) # if this is a dry run, we output the comments and exit if dry_run: print("%s" % (comments,)) return # if no comments were found we also exit if not comments_count: return # Get a list of all forums for an API key. Each API key can have # multiple forums associated. This application only supports the one # set in the DISQUS_WEBSITE_SHORTNAME variable forum_list = client.get_forum_list(user_api_key=settings.DISQUS_API_KEY) try: forum = [f for f in forum_list\ if f['shortname'] == settings.DISQUS_WEBSITE_SHORTNAME][0] except __HOLE__: raise CommandError("Could not find forum. " + "Please check your " + "'DISQUS_WEBSITE_SHORTNAME' setting.") # Get the forum API key forum_api_key = client.get_forum_api_key( user_api_key=settings.DISQUS_API_KEY, forum_id=forum['id']) for comment in comments: if verbosity >= 1: print("Exporting comment '%s'" % comment) # Try to find a thread with the comments URL. url = 'http://%s%s' % ( current_site.domain, comment.content_object.get_absolute_url()) thread = client.get_thread_by_url( url=url, forum_api_key=forum_api_key) # if no thread with the URL could be found, we create a new one. # to do this, we first need to create the thread and then # update the thread with a URL. if not thread: thread = client.thread_by_identifier( forum_api_key=forum_api_key, identifier=force_text(comment.content_object), title=force_text(comment.content_object), )['thread'] client.update_thread( forum_api_key=forum_api_key, thread_id=thread['id'], url=url) # name and email are optional in contrib.comments but required # in DISQUS. If they are not set, dummy values will be used client.create_post( forum_api_key=forum_api_key, thread_id=thread['id'], message=comment.comment.encode('utf-8'), author_name=comment.userinfo.get('name', 'nobody').encode('utf-8'), author_email=comment.userinfo.get('email', 'nobody@example.org'), author_url=comment.userinfo.get('url', ''), created_at=comment.submit_date.strftime('%Y-%m-%dT%H:%M')) if state_file is not None: self._save_state(state_file, comment.pk)
IndexError
dataset/ETHPy150Open arthurk/django-disqus/disqus/management/commands/disqus_export.py/Command.handle
7,459
def _to_json(self, resp): """ Extract json from a response. Assumes response is valid otherwise. Internal use only. """ try: json = resp.json() except __HOLE__ as e: reason = "TMC Server did not send valid JSON: {0}" raise APIError(reason.format(repr(e))) return json
ValueError
dataset/ETHPy150Open JuhaniImberg/tmc.py/tmc/api.py/API._to_json
7,460
def _run_scenario(self, cls, method, context, args): """Runs the specified benchmark scenario with given arguments. :param cls: The Scenario class where the scenario is implemented :param method: Name of the method that implements the scenario :param context: Benchmark context that contains users, admin & other information, that was created before benchmark started. :param args: Arguments to call the scenario method with :returns: List of results fore each single scenario iteration, where each result is a dictionary """ timeout = self.config.get("timeout", 600) concurrency = self.config.get("concurrency", 1) duration = self.config.get("duration") # FIXME(andreykurilin): unify `_worker_process`, use it here and remove # usage of `multiprocessing.Pool`(usage of separate process for # each concurrent iteration is redundant). pool = multiprocessing.Pool(concurrency) run_args = butils.infinite_run_args_generator( self._iter_scenario_args(cls, method, context, args, self.aborted)) iter_result = pool.imap(_run_scenario_once_with_unpack_args, run_args) start = time.time() while True: try: result = iter_result.next(timeout) except multiprocessing.TimeoutError as e: result = runner.format_result_on_timeout(e, timeout) except __HOLE__: break self._send_result(result) if time.time() - start > duration: break pool.terminate() pool.join() self._flush_results()
StopIteration
dataset/ETHPy150Open openstack/rally/rally/plugins/common/runners/constant.py/ConstantForDurationScenarioRunner._run_scenario
7,461
def test_permutation_step_down_p(): """Test cluster level permutations with step_down_p """ try: try: from sklearn.feature_extraction.image import grid_to_graph except __HOLE__: from scikits.learn.feature_extraction.image import grid_to_graph # noqa except ImportError: return rng = np.random.RandomState(0) # subjects, time points, spatial points X = rng.randn(9, 2, 10) # add some significant points X[:, 0:2, 0:2] += 2 # span two time points and two spatial points X[:, 1, 5:9] += 0.5 # span four time points with 4x smaller amplitude thresh = 2 # make sure it works when we use ALL points in step-down t, clusters, p, H0 = \ permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=1.0) # make sure using step-down will actually yield improvements sometimes t, clusters, p_old, H0 = \ permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=0.0) assert_equal(np.sum(p_old < 0.05), 1) # just spatial cluster t, clusters, p_new, H0 = \ permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=0.05) assert_equal(np.sum(p_new < 0.05), 2) # time one rescued assert_true(np.all(p_old >= p_new))
ImportError
dataset/ETHPy150Open mne-tools/mne-python/mne/stats/tests/test_cluster_level.py/test_permutation_step_down_p
7,462
def test_cluster_permutation_with_connectivity(): """Test cluster level permutations with connectivity matrix """ try: try: from sklearn.feature_extraction.image import grid_to_graph except ImportError: from scikits.learn.feature_extraction.image import grid_to_graph except __HOLE__: return condition1_1d, condition2_1d, condition1_2d, condition2_2d = \ _get_conditions() n_pts = condition1_1d.shape[1] # we don't care about p-values in any of these, so do fewer permutations args = dict(seed=None, max_step=1, exclude=None, step_down_p=0, t_power=1, threshold=1.67, check_disjoint=False, n_permutations=50) did_warn = False for X1d, X2d, func, spatio_temporal_func in \ [(condition1_1d, condition1_2d, permutation_cluster_1samp_test, spatio_temporal_cluster_1samp_test), ([condition1_1d, condition2_1d], [condition1_2d, condition2_2d], permutation_cluster_test, spatio_temporal_cluster_test)]: out = func(X1d, **args) connectivity = grid_to_graph(1, n_pts) out_connectivity = func(X1d, connectivity=connectivity, **args) assert_array_equal(out[0], out_connectivity[0]) for a, b in zip(out_connectivity[1], out[1]): assert_array_equal(out[0][a], out[0][b]) assert_true(np.all(a[b])) # test spatio-temporal w/o time connectivity (repeat spatial pattern) connectivity_2 = sparse.coo_matrix( linalg.block_diag(connectivity.asfptype().todense(), connectivity.asfptype().todense())) if isinstance(X1d, list): X1d_2 = [np.concatenate((x, x), axis=1) for x in X1d] else: X1d_2 = np.concatenate((X1d, X1d), axis=1) out_connectivity_2 = func(X1d_2, connectivity=connectivity_2, **args) # make sure we were operating on the same values split = len(out[0]) assert_array_equal(out[0], out_connectivity_2[0][:split]) assert_array_equal(out[0], out_connectivity_2[0][split:]) # make sure we really got 2x the number of original clusters n_clust_orig = len(out[1]) assert_true(len(out_connectivity_2[1]) == 2 * n_clust_orig) # Make sure that we got the old ones back data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]]) data_2 = set([np.sum(out_connectivity_2[0][a]) for a in out_connectivity_2[1][:]]) assert_true(len(data_1.intersection(data_2)) == len(data_1)) # now use the other algorithm if isinstance(X1d, list): X1d_3 = [np.reshape(x, (-1, 2, n_space)) for x in X1d_2] else: X1d_3 = np.reshape(X1d_2, (-1, 2, n_space)) out_connectivity_3 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=0, threshold=1.67, check_disjoint=True) # make sure we were operating on the same values split = len(out[0]) assert_array_equal(out[0], out_connectivity_3[0][0]) assert_array_equal(out[0], out_connectivity_3[0][1]) # make sure we really got 2x the number of original clusters assert_true(len(out_connectivity_3[1]) == 2 * n_clust_orig) # Make sure that we got the old ones back data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]]) data_2 = set([np.sum(out_connectivity_3[0][a[0], a[1]]) for a in out_connectivity_3[1]]) assert_true(len(data_1.intersection(data_2)) == len(data_1)) # test new versus old method out_connectivity_4 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=2, threshold=1.67) out_connectivity_5 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=1, threshold=1.67) # clusters could be in a different order sums_4 = [np.sum(out_connectivity_4[0][a]) for a in out_connectivity_4[1]] sums_5 = [np.sum(out_connectivity_4[0][a]) for a in out_connectivity_5[1]] sums_4 = np.sort(sums_4) sums_5 = np.sort(sums_5) assert_array_almost_equal(sums_4, sums_5) if not _force_serial: assert_raises(ValueError, spatio_temporal_func, X1d_3, n_permutations=1, connectivity=connectivity, max_step=1, threshold=1.67, n_jobs=-1000) # not enough TFCE params assert_raises(KeyError, spatio_temporal_func, X1d_3, connectivity=connectivity, threshold=dict(me='hello')) # too extreme a start threshold with warnings.catch_warnings(record=True) as w: spatio_temporal_func(X1d_3, connectivity=connectivity, threshold=dict(start=10, step=1)) if not did_warn: assert_true(len(w) == 1) did_warn = True # too extreme a start threshold assert_raises(ValueError, spatio_temporal_func, X1d_3, connectivity=connectivity, tail=-1, threshold=dict(start=1, step=-1)) assert_raises(ValueError, spatio_temporal_func, X1d_3, connectivity=connectivity, tail=-1, threshold=dict(start=-1, step=1)) # wrong type for threshold assert_raises(TypeError, spatio_temporal_func, X1d_3, connectivity=connectivity, threshold=[]) # wrong value for tail assert_raises(ValueError, spatio_temporal_func, X1d_3, connectivity=connectivity, tail=2) # make sure it actually found a significant point out_connectivity_6 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=1, threshold=dict(start=1, step=1)) assert_true(np.min(out_connectivity_6[2]) < 0.05)
ImportError
dataset/ETHPy150Open mne-tools/mne-python/mne/stats/tests/test_cluster_level.py/test_cluster_permutation_with_connectivity
7,463
@slow_test def test_permutation_connectivity_equiv(): """Test cluster level permutations with and without connectivity """ try: try: from sklearn.feature_extraction.image import grid_to_graph except __HOLE__: from scikits.learn.feature_extraction.image import grid_to_graph except ImportError: return rng = np.random.RandomState(0) # subjects, time points, spatial points n_time = 2 n_space = 4 X = rng.randn(6, n_time, n_space) # add some significant points X[:, :, 0:2] += 10 # span two time points and two spatial points X[:, 1, 3] += 20 # span one time point max_steps = [1, 1, 1, 2] # This will run full algorithm in two ways, then the ST-algorithm in 2 ways # All of these should give the same results conns = [None, grid_to_graph(n_time, n_space), grid_to_graph(1, n_space), grid_to_graph(1, n_space)] stat_map = None thresholds = [2, dict(start=1.5, step=1.0)] sig_counts = [2, 5] sdps = [0, 0.05, 0.05] ots = ['mask', 'mask', 'indices'] stat_fun = partial(ttest_1samp_no_p, sigma=1e-3) for thresh, count in zip(thresholds, sig_counts): cs = None ps = None for max_step, conn in zip(max_steps, conns): for sdp, ot in zip(sdps, ots): t, clusters, p, H0 = \ permutation_cluster_1samp_test( X, threshold=thresh, connectivity=conn, n_jobs=2, max_step=max_step, stat_fun=stat_fun, step_down_p=sdp, out_type=ot) # make sure our output datatype is correct if ot == 'mask': assert_true(isinstance(clusters[0], np.ndarray)) assert_true(clusters[0].dtype == bool) assert_array_equal(clusters[0].shape, X.shape[1:]) else: # ot == 'indices' assert_true(isinstance(clusters[0], tuple)) # make sure all comparisons were done; for TFCE, no perm # should come up empty if count == 8: assert_true(not np.any(H0 == 0)) inds = np.where(p < 0.05)[0] assert_true(len(inds) == count) this_cs = [clusters[ii] for ii in inds] this_ps = p[inds] this_stat_map = np.zeros((n_time, n_space), dtype=bool) for ci, c in enumerate(this_cs): if isinstance(c, tuple): this_c = np.zeros((n_time, n_space), bool) for x, y in zip(c[0], c[1]): this_stat_map[x, y] = True this_c[x, y] = True this_cs[ci] = this_c c = this_c this_stat_map[c] = True if cs is None: ps = this_ps cs = this_cs if stat_map is None: stat_map = this_stat_map assert_array_equal(ps, this_ps) assert_true(len(cs) == len(this_cs)) for c1, c2 in zip(cs, this_cs): assert_array_equal(c1, c2) assert_array_equal(stat_map, this_stat_map)
ImportError
dataset/ETHPy150Open mne-tools/mne-python/mne/stats/tests/test_cluster_level.py/test_permutation_connectivity_equiv
7,464
@slow_test def spatio_temporal_cluster_test_connectivity(): """Test spatio-temporal cluster permutations """ try: try: from sklearn.feature_extraction.image import grid_to_graph except __HOLE__: from scikits.learn.feature_extraction.image import grid_to_graph except ImportError: return condition1_1d, condition2_1d, condition1_2d, condition2_2d = \ _get_conditions() rng = np.random.RandomState(0) noise1_2d = rng.randn(condition1_2d.shape[0], condition1_2d.shape[1], 10) data1_2d = np.transpose(np.dstack((condition1_2d, noise1_2d)), [0, 2, 1]) noise2_d2 = rng.randn(condition2_2d.shape[0], condition2_2d.shape[1], 10) data2_2d = np.transpose(np.dstack((condition2_2d, noise2_d2)), [0, 2, 1]) conn = grid_to_graph(data1_2d.shape[-1], 1) threshold = dict(start=4.0, step=2) T_obs, clusters, p_values_conn, hist = \ spatio_temporal_cluster_test([data1_2d, data2_2d], connectivity=conn, n_permutations=50, tail=1, seed=1, threshold=threshold, buffer_size=None) buffer_size = data1_2d.size // 10 T_obs, clusters, p_values_no_conn, hist = \ spatio_temporal_cluster_test([data1_2d, data2_2d], n_permutations=50, tail=1, seed=1, threshold=threshold, n_jobs=2, buffer_size=buffer_size) assert_equal(np.sum(p_values_conn < 0.05), np.sum(p_values_no_conn < 0.05)) # make sure results are the same without buffer_size T_obs, clusters, p_values2, hist2 = \ spatio_temporal_cluster_test([data1_2d, data2_2d], n_permutations=50, tail=1, seed=1, threshold=threshold, n_jobs=2, buffer_size=None) assert_array_equal(p_values_no_conn, p_values2)
ImportError
dataset/ETHPy150Open mne-tools/mne-python/mne/stats/tests/test_cluster_level.py/spatio_temporal_cluster_test_connectivity
7,465
def _take(self, size): frame = np.zeros(size) count = 0 for frame_index in range(size): offset = self.offset # offset = k + (j / self.resolution) k = int(offset) # integer part j = int((offset - k) * self.resolution) # fractional part coeffs = self.filt[j] # choose correct filter phase end = k + self.width # process input until all buffer is full with samples try: while self.index < end: self.buff[:-1] = self.buff[1:] self.buff[-1] = next(self.src) # throws StopIteration self.index += 1 except __HOLE__: break self.offset += self.freq # apply interpolation filter frame[frame_index] = np.dot(coeffs, self.buff) count = frame_index + 1 return self.equalizer(frame[:count])
StopIteration
dataset/ETHPy150Open romanz/amodem/amodem/sampling.py/Sampler._take
7,466
def get(self, key, default=None): try: return self[key] except __HOLE__: return default
KeyError
dataset/ETHPy150Open fluentpython/example-code/attic/dicts/strkeydict_dictsub.py/StrKeyDict.get
7,467
def __init__(self, driver, timeout, poll_frequency=POLL_FREQUENCY, ignored_exceptions=None): """Constructor, takes a WebDriver instance and timeout in seconds. :Args: - driver - Instance of WebDriver (Ie, Firefox, Chrome or Remote) - timeout - Number of seconds before timing out - poll_frequency - sleep interval between calls By default, it is 0.5 second. - ignored_exceptions - iterable structure of exception classes ignored during calls. By default, it contains NoSuchElementException only. Example: from selenium.webdriver.support.ui import WebDriverWait \n element = WebDriverWait(driver, 10).until(lambda x: x.find_element_by_id("someId")) \n is_disappeared = WebDriverWait(driver, 30, 1, (ElementNotVisibleException)).\ \n until_not(lambda x: x.find_element_by_id("someId").is_displayed()) """ self._driver = driver self._timeout = timeout self._poll = poll_frequency # avoid the divide by zero if self._poll == 0: self._poll = POLL_FREQUENCY exceptions = IGNORED_EXCEPTIONS if ignored_exceptions is not None: try: exceptions.extend(iter(ignored_exceptions)) except __HOLE__: # ignored_exceptions is not iterable exceptions.append(ignored_exceptions) self._ignored_exceptions = tuple(exceptions)
TypeError
dataset/ETHPy150Open apiad/sublime-browser-integration/selenium/webdriver/support/wait.py/WebDriverWait.__init__
7,468
def check_canary_table(self): try: flows = self.dump_flows(constants.CANARY_TABLE) except __HOLE__: LOG.exception(_LE("Failed to communicate with the switch")) return constants.OVS_DEAD return constants.OVS_NORMAL if flows else constants.OVS_RESTARTED
RuntimeError
dataset/ETHPy150Open openstack/neutron/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py/OVSIntegrationBridge.check_canary_table
7,469
def get_default_columns(self, with_aliases=False, col_aliases=None, start_alias=None, opts=None, as_pairs=False): """ Computes the default columns for selecting every field in the base model. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement. This routine is overridden from Query to handle customized selection of geometry columns. """ result = [] if opts is None: opts = self.model._meta if start_alias: table_alias = start_alias else: table_alias = self.tables[0] root_pk = self.model._meta.pk.column seen = {None: table_alias} aliases = set() for field, model in opts.get_fields_with_model(): try: alias = seen[model] except __HOLE__: alias = self.join((table_alias, model._meta.db_table, root_pk, model._meta.pk.column)) seen[model] = alias if as_pairs: result.append((alias, field.column)) continue # This part of the function is customized for GeoQuery. We # see if there was any custom selection specified in the # dictionary, and set up the selection format appropriately. field_sel = self.get_field_select(field, alias) if with_aliases and field.column in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (field_sel, c_alias)) col_aliases.add(c_alias) aliases.add(c_alias) else: r = field_sel result.append(r) aliases.add(r) if with_aliases: col_aliases.add(field.column) if as_pairs: return result, None return result, aliases
KeyError
dataset/ETHPy150Open dcramer/django-compositepks/django/contrib/gis/db/models/sql/query.py/GeoQuery.get_default_columns
7,470
def _check_geo_field(self, model, name_param): """ Recursive utility routine for checking the given name parameter on the given model. Initially, the name parameter is a string, of the field on the given model e.g., 'point', 'the_geom'. Related model field strings like 'address__point', may also be used. If a GeometryField exists according to the given name parameter it will be returned, otherwise returns False. """ if isinstance(name_param, basestring): # This takes into account the situation where the name is a # lookup to a related geographic field, e.g., 'address__point'. name_param = name_param.split(sql.constants.LOOKUP_SEP) name_param.reverse() # Reversing so list operates like a queue of related lookups. elif not isinstance(name_param, list): raise TypeError try: # Getting the name of the field for the model (by popping the first # name from the `name_param` list created above). fld, mod, direct, m2m = model._meta.get_field_by_name(name_param.pop()) except (FieldDoesNotExist, __HOLE__): return False # TODO: ManyToManyField? if isinstance(fld, GeometryField): return fld # A-OK. elif isinstance(fld, ForeignKey): # ForeignKey encountered, return the output of this utility called # on the _related_ model with the remaining name parameters. return self._check_geo_field(fld.rel.to, name_param) # Recurse to check ForeignKey relation. else: return False
IndexError
dataset/ETHPy150Open dcramer/django-compositepks/django/contrib/gis/db/models/sql/query.py/GeoQuery._check_geo_field
7,471
def __init__(self, expression, column_name, data_type): if not hasattr(column_name, 'resolve_expression'): column_name = Value(column_name) try: output_field = self.TYPE_MAP[data_type] except __HOLE__: raise ValueError("Invalid data_type '{}'".format(data_type)) if data_type == 'BINARY': output_field = output_field() # no spec else: output_field = output_field super(ColumnGet, self).__init__(expression, column_name, output_field=output_field, data_type=data_type)
KeyError
dataset/ETHPy150Open adamchainz/django-mysql/django_mysql/models/functions.py/ColumnGet.__init__
7,472
def __getitem__(self, *keys): """ to access an obj with key: 'n!##!m...!##!z', caller can pass as key: - n!##!m...!##!z - n, m, ..., z - z when separater == !##! :param dict keys: keys to access via scopes. """ k = six.moves.reduce(lambda k1, k2: scope_compose(k1, k2, sep=self.__sep), keys[0]) if isinstance(keys[0], tuple) else keys[0] try: return super(ScopeDict, self).__getitem__(k) except __HOLE__ as e: ret = [] for ik in self.keys(): if ik.endswith(k): ret.append(ik) if len(ret) == 1: return super(ScopeDict, self).__getitem__(ret[0]) elif len(ret) > 1: raise ValueError('Multiple occurrence of key: {0}'.format(k)) raise e
KeyError
dataset/ETHPy150Open mission-liao/pyswagger/pyswagger/utils.py/ScopeDict.__getitem__
7,473
def import_string(name): """ import module """ mod = fp = None # code below, please refer to # https://docs.python.org/2/library/imp.html # for details try: return sys.modules[name] except KeyError: pass try: fp, pathname, desc = imp.find_module(name) mod = imp.load_module(name, fp, pathname, desc) except __HOLE__: mod = None finally: # Since we may exit via an exception, close fp explicitly. if fp: fp.close() return mod
ImportError
dataset/ETHPy150Open mission-liao/pyswagger/pyswagger/utils.py/import_string
7,474
def collect(self): url = "%s://%s:%s/vars" % (self.config['scheme'], self.config['host'], self.config['port']) response = urllib2.urlopen(url) for line in response.readlines(): properties = line.split() # Not all lines returned will have a numeric metric. # To account for this, we attempt to cast the 'value' # portion as a float. If that's not possible, NBD, we # just move on. try: if len(properties) > 1: subpath = properties[0].replace('/', '.').replace('_', '.') value = float(properties[1]) self.publish(subpath, value) except __HOLE__: continue
ValueError
dataset/ETHPy150Open Yelp/fullerite/src/diamond/collectors/aurora/aurora.py/AuroraCollector.collect
7,475
def __init__(self, f_set, parent1, parent2): self.f_set = deepcopy(f_set) self.index = -1 # -1 means it has not been discovered yet self.p1 = parent1 self.p2 = parent2 try: self.hp_dtype = np.dtype('float128') except __HOLE__: self.hp_dtype = np.dtype('float64') self.a = np.array(0., dtype=self.hp_dtype) self.b = np.array(0., dtype=self.hp_dtype) self.e = np.array(0., dtype=self.hp_dtype)
TypeError
dataset/ETHPy150Open rlpy/rlpy/rlpy/Representations/iFDD.py/iFDDK_potential.__init__
7,476
def __init__( self, domain, discovery_threshold, initial_representation, sparsify=True, discretization=20, debug=0, useCache=0, kappa=1e-5, lambda_=0., lazy=False): try: self.hp_dtype = np.dtype('float128') except __HOLE__: self.hp_dtype = np.dtype('float64') self.y_a = defaultdict(lambda: np.array(0., dtype=self.hp_dtype)) self.y_b = defaultdict(lambda: np.array(0., dtype=self.hp_dtype)) self.lambda_ = lambda_ self.kappa = kappa self.discount_factor = domain.discount_factor self.lazy = lazy # lazy updating? super( iFDDK, self).__init__(domain, discovery_threshold, initial_representation, sparsify=sparsify, discretization=discretization, debug=debug, useCache=useCache)
TypeError
dataset/ETHPy150Open rlpy/rlpy/rlpy/Representations/iFDD.py/iFDDK.__init__
7,477
def subdivide(network, pores, shape, labels=[]): r''' It trim the pores and replace them by cubic networks with the sent shape. Parameters ---------- network : OpenPNM Network Object pores : array_like The first group of pores to be replaced shape : array_like The shape of cubic networks in the target locations Notes ----- - It works only for cubic networks. Examples -------- >>> import OpenPNM >>> pn = OpenPNM.Network.Cubic(shape=[5,6,5], spacing=0.001) >>> pn.Np 150 >>> nano_pores = [2,13,14,15] >>> pn.subdivide(pores=nano_pores, shape=[4,7,3], labels='nano') >>> pn.Np 482 >>> assert pn.Np == (150+4*(4*7*3)-4) ''' mro = [item.__name__ for item in network.__class__.__mro__] if 'Cubic' not in mro: raise Exception('Subdivide is only supported for Cubic Networks') from OpenPNM.Network import Cubic pores = _sp.array(pores, ndmin=1) # Checks to find boundary pores in the selected pores try: b = network.pores('boundary') if (_sp.in1d(pores, b)).any(): raise Exception('boundary pores cannot be subdivided!') except __HOLE__: pass # Assigning right shape and division if _sp.size(shape) != 2 and _sp.size(shape) != 3: raise Exception('Subdivide not implemented for Networks other than 2D \ and 3D') elif _sp.size(shape) == 3 and 1 not in shape: div = _sp.array(shape, ndmin=1) single_dim = None else: single_dim = _sp.where(_sp.array(network._shape) == 1)[0] if _sp.size(single_dim) == 0: single_dim = None if _sp.size(shape) == 3: div = _sp.array(shape, ndmin=1) else: div = _sp.zeros(3, dtype=_sp.int32) if single_dim is None: dim = 2 else: dim = single_dim div[dim] = 1 div[-_sp.array(div, ndmin=1, dtype=bool)] = _sp.array(shape, ndmin=1) # Creating small network and handling labels network_spacing = network._spacing new_net_spacing = network_spacing/div new_net = Cubic(shape=div, spacing=new_net_spacing) main_labels = ['left', 'right', 'front', 'back', 'top', 'bottom'] if single_dim is not None: label_groups = _sp.array([['front', 'back'], ['left', 'right'], ['top', 'bottom']]) non_single_labels = label_groups[_sp.array([0, 1, 2]) != single_dim] for l in main_labels: new_net['pore.surface_' + l] = False network['pore.surface_' + l] = False if single_dim is None: new_net['pore.surface_' + l][new_net.pores(labels=l)] = True else: for ind in [0, 1]: loc = (non_single_labels[ind] == l) temp_pores = new_net.pores(non_single_labels[ind][loc]) new_net['pore.surface_' + l][temp_pores] = True old_coords = _sp.copy(new_net['pore.coords']) if labels == []: labels = ['pore.subdivided_' + new_net.name] for P in pores: # Shifting the new network to the right location and attaching it to # the main network shift = network['pore.coords'][P] - network_spacing/2 new_net['pore.coords'] += shift Pn = network.find_neighbor_pores(pores=P) try: Pn_new_net = network.pores(labels) except: Pn_new_net = [] Pn_old_net = Pn[~_sp.in1d(Pn, Pn_new_net)] Np1 = network.Np extend(pore_coords=new_net['pore.coords'], throat_conns=new_net['throat.conns'] + Np1, labels=labels, network=network) # Moving the temporary labels to the big network for l in main_labels: network['pore.surface_'+l][Np1:] = new_net['pore.surface_'+l] # Stitching the old pores of the main network to the new extended pores surf_pores = network.pores('surface_*') surf_coord = network['pore.coords'][surf_pores] for neighbor in Pn: neighbor_coord = network['pore.coords'][neighbor] dist = [round(_sp.inner(neighbor_coord-x, neighbor_coord-x), 20) for x in surf_coord] nearest_neighbor = surf_pores[dist == _sp.amin(dist)] if neighbor in Pn_old_net: coplanar_labels = network.labels(pores=nearest_neighbor) new_neighbors = network.pores(coplanar_labels, mode='intersection') # This might happen to the edge of the small network if _sp.size(new_neighbors) == 0: labels = network.labels(pores=nearest_neighbor, mode='intersection') common_label = [l for l in labels if 'surface_' in l] new_neighbors = network.pores(common_label) elif neighbor in Pn_new_net: new_neighbors = nearest_neighbor connect_pores(network=network, pores1=neighbor, pores2=new_neighbors, labels=labels) # Removing temporary labels for l in main_labels: network['pore.surface_' + l] = False new_net['pore.coords'] = _sp.copy(old_coords) network._label_surfaces() for l in main_labels: del network['pore.surface_'+l] trim(network=network, pores=pores)
KeyError
dataset/ETHPy150Open PMEAL/OpenPNM/OpenPNM/Network/tools.py/subdivide
7,478
def recv_into(self, buffer): index = 0 try: for _ in range(self.read_count): pkt = self.inbound_packets.pop(0) buffer[index:index+len(pkt)] = pkt index += len(pkt) except __HOLE__: pass return index
IndexError
dataset/ETHPy150Open Lukasa/hyper/test/test_socket.py/DummySocket.recv_into
7,479
def generate_css_from_sass(self, css_file, sass_file): if not self.process_args: log.info( 'Not generating files since {0!r} is not executable'.format( self.sass_bin_path ) ) return if not os.path.isfile(css_file): css_mtime = -1 else: css_mtime = os.path.getmtime(css_file) sass_mtime = os.path.getmtime(sass_file) if sass_mtime >= css_mtime: log.info("Generating %s from %s", os.path.basename(css_file), os.path.basename(sass_file)) args = self.process_args + [sass_file, css_file] log.debug("Subprocess call \"%s\"", ' '.join(args)) try: retcode = subprocess.call(args, shell=False) if retcode != 0: log.error("Failed to compile %s", sass_file) if os.path.isfile(css_file): os.remove(css_file) except __HOLE__, err: log.exception(err) if os.path.isfile(css_file): os.remove(css_file)
OSError
dataset/ETHPy150Open UfSoft/Flask-Sass/flask_sass.py/Sass.generate_css_from_sass
7,480
@property def revision(self): try: return self.revision_set.latest('pub_date') except __HOLE__: return None
ObjectDoesNotExist
dataset/ETHPy150Open kylef-archive/lithium/lithium/wiki/models.py/Page.revision
7,481
def _load_sensu_team_data(): try: with open('/etc/sensu/team_data.json') as f: team_data = json.load(f) except __HOLE__: log.warning("No Sensu Team data (/etc/sensu/team_data.json) available. Using empty defaults") team_data = {} return team_data
IOError
dataset/ETHPy150Open Yelp/paasta/paasta_tools/monitoring_tools.py/_load_sensu_team_data
7,482
def process_request(self, request): "Process request" hmodules = dict() for module in settings.INSTALLED_APPS: import_name = str( module) + "." + settings.HARDTREE_MODULE_IDENTIFIER try: hmodule = __import__(import_name, fromlist=[str(module)]) hmodules[str(module)] = hmodule.PROPERTIES except __HOLE__: pass except AttributeError: pass dbmodules = Module.objects.all() for dbmodule in dbmodules: if dbmodule.name not in hmodules: dbmodule.delete() else: differ = False hmodule = dbmodule.name if dbmodule.title != hmodules[hmodule]['title']: dbmodule.title = hmodules[hmodule]['title'] differ = True if dbmodule.url != hmodules[hmodule]['url']: dbmodule.url = hmodules[hmodule]['url'] differ = True if dbmodule.details != hmodules[hmodule]['details']: dbmodule.details = hmodules[hmodule]['details'] differ = True if dbmodule.system != hmodules[hmodule]['system']: dbmodule.system = hmodules[hmodule]['system'] differ = True if differ: dbmodule.save() for hmodule in hmodules: dbmodule = None try: dbmodule = Module.objects.get(name=hmodule) except Module.DoesNotExist: pass except Module.MultipleObjectsReturned: # Broken database, delete all matching modules Module.objects.filter(name=hmodule).delete() if not dbmodule: dbmodule = Module(name=hmodule, title=hmodules[hmodule]['title'], url=hmodules[hmodule]['url'], details=hmodules[hmodule]['details'], system=hmodules[hmodule]['system']) dbmodule.save() dbmodule.set_default_user()
ImportError
dataset/ETHPy150Open treeio/treeio/treeio/core/middleware/modules.py/ModuleDetect.process_request
7,483
def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier. The name may resolve either to a module, a test case class, a test method within a test case class, or a callable object which returns a TestCase or TestSuite instance. The method optionally resolves the names relative to a given module. """ parts = name.split('.') unused_parts = [] if module is None: if not parts: raise ValueError("incomplete test name: %s" % name) else: parts_copy = parts[:] while parts_copy: target = ".".join(parts_copy) if target in sys.modules: module = reload(sys.modules[target]) parts = unused_parts break else: try: module = __import__(target) parts = unused_parts break except __HOLE__: unused_parts.insert(0,parts_copy[-1]) del parts_copy[-1] if not parts_copy: raise parts = parts[1:] obj = module for part in parts: obj = getattr(obj, part) if type(obj) == types.ModuleType: return self.loadTestsFromModule(obj) elif (((py3k and isinstance(obj, type)) or isinstance(obj, (type, types.ClassType))) and issubclass(obj, TestCase)): return self.loadTestsFromTestCase(obj) elif type(obj) == types.UnboundMethodType: if py3k: return obj.__self__.__class__(obj.__name__) else: return obj.im_class(obj.__name__) elif hasattr(obj, '__call__'): test = obj() if not isinstance(test, TestCase) and \ not isinstance(test, TestSuite): raise ValueError("calling %s returned %s, " "not a test" % (obj,test)) return test else: raise ValueError("do not know how to make test from: %s" % obj)
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/cherrypy/cherrypy/test/webtest.py/ReloadingTestLoader.loadTestsFromName
7,484
def set_persistent(self, on=True, auto_open=False): """Make our HTTP_CONN persistent (or not). If the 'on' argument is True (the default), then self.HTTP_CONN will be set to an instance of HTTPConnection (or HTTPS if self.scheme is "https"). This will then persist across requests. We only allow for a single open connection, so if you call this and we currently have an open connection, it will be closed. """ try: self.HTTP_CONN.close() except (__HOLE__, AttributeError): pass if on: self.HTTP_CONN = self.get_conn(auto_open=auto_open) else: if self.scheme == "https": self.HTTP_CONN = HTTPSConnection else: self.HTTP_CONN = HTTPConnection
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/cherrypy/cherrypy/test/webtest.py/WebCase.set_persistent
7,485
def _get_cell(): curr = getcurrent() try: cell = curr._cell except __HOLE__: if hasattr(curr, 'spawn_actor'): cell = curr else: return None # give out the method, not the cell object itself, to avoid exposing the internals return cell
AttributeError
dataset/ETHPy150Open eallik/spinoff/spinoff/actor/context.py/_get_cell
7,486
def _save_dictionary_parameter(self, dict_param): full_msg = self.data # look for %(blah) fields in string; # ignore %% and deal with the # case where % is first character on the line keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg) # if we don't find any %(blah) blocks but have a %s if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg): # apparently the full dictionary is the parameter params = copy.deepcopy(dict_param) else: params = {} for key in keys: try: params[key] = copy.deepcopy(dict_param[key]) except __HOLE__: # cast uncopyable thing to unicode string params[key] = unicode(dict_param[key]) return params
TypeError
dataset/ETHPy150Open openstack-dev/heat-cfnclient/heat_cfnclient/openstack/common/gettextutils.py/Message._save_dictionary_parameter
7,487
def _save_parameters(self, other): # we check for None later to see if # we actually have parameters to inject, # so encapsulate if our parameter is actually None if other is None: self.params = (other, ) elif isinstance(other, dict): self.params = self._save_dictionary_parameter(other) else: # fallback to casting to unicode, # this will handle the problematic python code-like # objects that cannot be deep-copied try: self.params = copy.deepcopy(other) except __HOLE__: self.params = unicode(other) return self # overrides to be more string-like
TypeError
dataset/ETHPy150Open openstack-dev/heat-cfnclient/heat_cfnclient/openstack/common/gettextutils.py/Message._save_parameters
7,488
def SplitNetworkNameSecurity(network): """Splits a network name from it's security type. Splits strings of the form NetworkName$NetworkSecurity into a tuple of NetworkName, NetworkSecurity. Verifies that security matches one of the allowed options in _SECURITY_TYPES; if the security type is missing or is not in the allowed types, it is assumed to be OPEN. Args: network: str, the network name to split Returns: name: str, the network name. security: str, the network security type. """ try: name, security = network.split('$') security = security.upper() if security not in _SECURITY_TYPES: security = 'OPEN' except __HOLE__: name = network security = 'OPEN' return name, security
ValueError
dataset/ETHPy150Open google/macops/gmacpyutil/gmacpyutil/wifi_network_order.py/SplitNetworkNameSecurity
7,489
def __add__(self, om): if not isinstance(om, self.__class__) or om.ring != self.ring: try: om = self.ring.convert(om) except (__HOLE__, CoercionFailed): return NotImplemented return self.ring(self.data + om.data)
NotImplementedError
dataset/ETHPy150Open sympy/sympy/sympy/polys/domains/quotientring.py/QuotientRingElement.__add__
7,490
def __mul__(self, o): if not isinstance(o, self.__class__): try: o = self.ring.convert(o) except (__HOLE__, CoercionFailed): return NotImplemented return self.ring(self.data*o.data)
NotImplementedError
dataset/ETHPy150Open sympy/sympy/sympy/polys/domains/quotientring.py/QuotientRingElement.__mul__
7,491
def __div__(self, o): if not isinstance(o, self.__class__): try: o = self.ring.convert(o) except (__HOLE__, CoercionFailed): return NotImplemented return self.ring.revert(o)*self
NotImplementedError
dataset/ETHPy150Open sympy/sympy/sympy/polys/domains/quotientring.py/QuotientRingElement.__div__
7,492
def revert(self, a): """ Compute a**(-1), if possible. """ I = self.ring.ideal(a.data) + self.base_ideal try: return self(I.in_terms_of_generators(1)[0]) except __HOLE__: # 1 not in I raise NotReversible('%s not a unit in %r' % (a, self))
ValueError
dataset/ETHPy150Open sympy/sympy/sympy/polys/domains/quotientring.py/QuotientRing.revert
7,493
def invalidate_cache(self, product): """ The method ``ProductCommonSerializer.render_html`` caches the rendered HTML snippets. Invalidate them after changing something in the product. """ try: cache.delete_pattern('product:{}|*'.format(product.id)) except __HOLE__: pass
AttributeError
dataset/ETHPy150Open awesto/django-shop/shop/admin/product.py/InvalidateProductCacheMixin.invalidate_cache
7,494
def test_get_with_no_support(self): """Testing the GET repositories/<id>/commits/ API with a repository that does not implement it """ repository = self.create_repository(tool_name='CVS') repository.save() try: rsp = self.api_get( get_repository_commits_url(repository), query={'start': ''}, expected_status=501) except __HOLE__: raise nose.SkipTest("cvs binary not found") self.assertEqual(rsp['stat'], 'fail') self.assertEqual(rsp['err']['code'], REPO_NOT_IMPLEMENTED.code)
ImportError
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/tests/test_repository_commits.py/ResourceTests.test_get_with_no_support
7,495
def register(_id, callbacks, schemas, formats): ret = {} if 'calvinip' in schemas: try: import calvinip_transport f = calvinip_transport.CalvinTransportFactory(_id, callbacks) factories[_id] = f ret['calvinip'] = f except __HOLE__: traceback.print_exc() return ret
ImportError
dataset/ETHPy150Open EricssonResearch/calvin-base/calvin/runtime/south/plugins/transports/calvinip/__init__.py/register
7,496
def _format_source_error(filename, context, lineno): """ A helper function which generates an error string. This function handles the work of reading the lines of the file which bracket the error, and formatting a string which points to the offending line. The output is similar to: File "foo.py", line 42, in bar() 41 def bar(): ----> 42 a = a + 1 43 return a Parameters ---------- filename : string The name of the offending file. context : string The string name of the context scope in which the error occured. In the sample above, the context is 'bar'. lineno : int The integer line number of the offending line. Returns ------- result : string A nicely formatted string for including in an exception. If the file cannot be opened, the source lines will note be included. """ text = 'File "%s", line %d, in %s()' % (filename, lineno, context) start_lineno = max(0, lineno - 1) end_lineno = start_lineno + 2 lines = [] try: with open(filename, 'r') as f: for idx, line in enumerate(f, 1): if idx >= start_lineno and idx <= end_lineno: lines.append((idx, line)) elif idx > end_lineno: break except __HOLE__: pass if len(lines) > 0: digits = str(len(str(end_lineno))) line_templ = '\n----> %' + digits + 'd %s' other_templ = '\n %' + digits + 'd %s' for lno, line in lines: line = line.rstrip() if lno == lineno: text += line_templ % (lno, line) else: text += other_templ % (lno, line) return text
IOError
dataset/ETHPy150Open ContinuumIO/ashiba/enaml/enaml/core/exceptions.py/_format_source_error
7,497
def set_isolation_level(self, connection, level): try: level = self._isolation_lookup[level.replace('_', ' ')] except __HOLE__: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ) connection.set_isolation_level(level)
KeyError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/dialects/postgresql/psycopg2.py/PGDialect_psycopg2.set_isolation_level
7,498
def call_input_function(self, function, params): """self.call_input_function(function, params) -> None Calls the input function on the vtkInstance, or a special input function if one exists in the class.""" if hasattr(self, '_special_input_function_' + function): attr = getattr(self, '_special_input_function_' + function) else: try: attr = getattr(self.vtkInstance, function) except __HOLE__: # Compensates for overload by exploiting the fact that # no VTK method has underscores. f = function.find('_') if f != -1: function = function[:f] attr = getattr(self.vtkInstance, function) attr(*params) # print "Called ",attr,function,params
AttributeError
dataset/ETHPy150Open VisTrails/VisTrails/contrib/titan/base_module.py/vtkBaseModule.call_input_function
7,499
def soup_maker(fh): skip_headers(fh) try: from bs4 import BeautifulSoup soup = BeautifulSoup(fh, "xml") for tag in soup.findAll(): tag.name = tag.name.lower() except __HOLE__: from BeautifulSoup import BeautifulStoneSoup soup = BeautifulStoneSoup(fh) return soup
ImportError
dataset/ETHPy150Open jseutter/ofxparse/ofxparse/ofxparse.py/soup_maker