Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
1,500
def _items(self, req, server_id, entity_maker): """Returns a list of VIFs, transformed through entity_maker.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) try: vifs = self.network_api.get_vifs_by_instance(context, instance) except __HOLE__: msg = _('Listing virtual interfaces is not supported by this ' 'cloud.') raise webob.exc.HTTPBadRequest(explanation=msg) limited_list = common.limited(vifs, req) res = [entity_maker(vif) for vif in limited_list] return {'virtual_interfaces': res}
NotImplementedError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/virtual_interfaces.py/ServerVirtualInterfaceController._items
1,501
def _parse_expires(expires): """ Parse the 'expires' attribute, guessing what format it is in and returning a datetime """ # none is used to signify positive infinity if expires is None or expires in ('never', 'infinity'): return 'infinity' try: return dateutil.parser.parse(str(expires)) except __HOLE__ as exc: pass try: # use parsedatetime for "human readable" time specs exp = pdt.parse(expires)[0] # and convert to datetime return datetime.datetime.fromtimestamp(time.mktime(exp)) except ValueError as exc: pass raise NipapValueError("Invalid date specification for expires")
ValueError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/_parse_expires
1,502
def _is_ipv4(self, ip): """ Return true if given arg is a valid IPv4 address """ try: p = IPy.IP(ip) except __HOLE__: return False if p.version() == 4: return True return False
ValueError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._is_ipv4
1,503
def _is_ipv6(self, ip): """ Return true if given arg is a valid IPv6 address """ try: p = IPy.IP(ip) except __HOLE__: return False if p.version() == 6: return True return False
ValueError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._is_ipv6
1,504
def _get_afi(self, ip): """ Return address-family (4 or 6) for IP or None if invalid address """ parts = unicode(ip).split("/") if len(parts) == 1: # just an address if self._is_ipv4(ip): return 4 elif self._is_ipv6(ip): return 6 else: return None elif len(parts) == 2: # a prefix! try: pl = int(parts[1]) except __HOLE__: # if casting parts[1] to int failes, this is not a prefix.. return None if self._is_ipv4(parts[0]): if pl >= 0 and pl <= 32: # prefix mask must be between 0 and 32 return 4 # otherwise error return None elif self._is_ipv6(parts[0]): if pl >= 0 and pl <= 128: # prefix mask must be between 0 and 128 return 6 # otherwise error return None else: return None else: # more than two parts.. this is neither an address or a prefix return None # # SQL related functions #
ValueError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._get_afi
1,505
def _get_query_parts(self, query_str, search_options=None): """ Split a query string into its parts """ if search_options is None: search_options = {} if query_str is None: raise NipapValueError("'query_string' must not be None") # find query parts query_str_parts = [] try: for part in shlex.split(query_str.encode('utf-8')): query_str_parts.append({ 'string': part.decode('utf-8') }) except __HOLE__ as exc: if str(exc) == 'No closing quotation': raise NipapValueError(str(exc)) raise exc # Handle empty search. # We need something to iterate over, but shlex.split() returns # zero-element list for an empty string, so we have to append one # manually if len(query_str_parts) == 0: query_str_parts.append({ 'string': '' }) return query_str_parts
ValueError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._get_query_parts
1,506
def _expand_vrf_query(self, query, table_name = None): """ Expand VRF query dict into a WHERE-clause. If you need to prefix each column reference with a table name, that can be supplied via the table_name argument. """ where = str() opt = list() # handle table name, can be None if table_name is None: col_prefix = "" else: col_prefix = table_name + "." if type(query['val1']) == dict and type(query['val2']) == dict: # Sub expression, recurse! This is used for boolean operators: AND OR # add parantheses sub_where1, opt1 = self._expand_vrf_query(query['val1'], table_name) sub_where2, opt2 = self._expand_vrf_query(query['val2'], table_name) try: where += str(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) ) except __HOLE__: raise NipapNoSuchOperatorError("No such operator %s" % str(query['operator'])) opt += opt1 opt += opt2 else: # TODO: raise exception if someone passes one dict and one "something else"? # val1 is variable, val2 is string. if query['val1'] not in _vrf_spec: raise NipapInputError('Search variable \'%s\' unknown' % str(query['val1'])) # build where clause if query['operator'] not in _operation_map: raise NipapNoSuchOperatorError("No such operator %s" % query['operator']) # workaround for handling equal matches of NULL-values if query['operator'] == 'equals' and query['val2'] is None: query['operator'] = 'is' elif query['operator'] == 'not_equals' and query['val2'] is None: query['operator'] = 'is_not' if query['operator'] in ('equals_any',): where = str(" %%s = ANY (%s%s::citext[]) " % ( col_prefix, _vrf_spec[query['val1']]['column']) ) else: where = str(" %s%s %s %%s " % ( col_prefix, _vrf_spec[query['val1']]['column'], _operation_map[query['operator']] ) ) opt.append(query['val2']) return where, opt
KeyError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._expand_vrf_query
1,507
def search_vrf(self, auth, query, search_options=None): """ Search VRF list for VRFs matching `query`. * `auth` [BaseAuth] AAA options. * `query` [dict_to_sql] How the search should be performed. * `search_options` [options_dict] Search options, see below. Returns a list of dicts. The `query` argument passed to this function is designed to be able to specify how quite advanced search operations should be performed in a generic format. It is internally expanded to a SQL WHERE-clause. The `query` is a dict with three elements, where one specifies the operation to perform and the two other specifies its arguments. The arguments can themselves be `query` dicts, to build more complex queries. The :attr:`operator` key specifies what operator should be used for the comparison. Currently the following operators are supported: * :data:`and` - Logical AND * :data:`or` - Logical OR * :data:`equals` - Equality; = * :data:`not_equals` - Inequality; != * :data:`like` - SQL LIKE * :data:`regex_match` - Regular expression match * :data:`regex_not_match` - Regular expression not match The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected to the comparison. :attr:`val1` can be either any prefix attribute or an entire query dict. :attr:`val2` can be either the value you want to compare the prefix attribute to, or an entire `query` dict. Example 1 - Find the VRF whose VRF match '65000:123':: query = { 'operator': 'equals', 'val1': 'vrf', 'val2': '65000:123' } This will be expanded to the pseudo-SQL query:: SELECT * FROM vrf WHERE vrf = '65000:123' Example 2 - Find vrf whose name or description regex matches 'test':: query = { 'operator': 'or', 'val1': { 'operator': 'regex_match', 'val1': 'name', 'val2': 'test' }, 'val2': { 'operator': 'regex_match', 'val1': 'description', 'val2': 'test' } } This will be expanded to the pseudo-SQL query:: SELECT * FROM vrf WHERE name ~* 'test' OR description ~* 'test' The search options can also be used to limit the number of rows returned or set an offset for the result. The following options are available: * :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`). * :attr:`offset` - Offset the result list this many prefixes (default :data:`0`). This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.search_vrf` for full understanding. """ if search_options is None: search_options = {} # # sanitize search options and set default if option missing # # max_result if 'max_result' not in search_options: search_options['max_result'] = 50 else: try: search_options['max_result'] = int(search_options['max_result']) except (ValueError, __HOLE__): raise NipapValueError('Invalid value for option' + ''' 'max_result'. Only integer values allowed.''') # offset if 'offset' not in search_options: search_options['offset'] = 0 else: try: search_options['offset'] = int(search_options['offset']) except (ValueError, TypeError): raise NipapValueError('Invalid value for option' + ''' 'offset'. Only integer values allowed.''') self._logger.debug('search_vrf called; query: %s search_options: %s' % (str(query), str(search_options))) opt = None sql = """ SELECT * FROM ip_net_vrf""" # add where clause if we have any search terms if query != {}: where, opt = self._expand_vrf_query(query) sql += " WHERE " + where sql += " ORDER BY vrf_rt_order(rt) NULLS FIRST LIMIT " + str(search_options['max_result']) + " OFFSET " + str(search_options['offset']) self._execute(sql, opt) result = list() for row in self._curs_pg: result.append(dict(row)) return { 'search_options': search_options, 'result': result }
TypeError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap.search_vrf
1,508
def _expand_pool_query(self, query, table_name = None): """ Expand pool query dict into a WHERE-clause. If you need to prefix each column reference with a table name, that can be supplied via the table_name argument. """ where = str() opt = list() # handle table name, can be None if table_name is None: col_prefix = "" else: col_prefix = table_name + "." if type(query['val1']) == dict and type(query['val2']) == dict: # Sub expression, recurse! This is used for boolean operators: AND OR # add parantheses sub_where1, opt1 = self._expand_pool_query(query['val1'], table_name) sub_where2, opt2 = self._expand_pool_query(query['val2'], table_name) try: where += str(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) ) except __HOLE__: raise NipapNoSuchOperatorError("No such operator %s" % str(query['operator'])) opt += opt1 opt += opt2 else: # TODO: raise exception if someone passes one dict and one "something else"? # val1 is variable, val2 is string. if query['val1'] not in _pool_spec: raise NipapInputError('Search variable \'%s\' unknown' % str(query['val1'])) # build where clause if query['operator'] not in _operation_map: raise NipapNoSuchOperatorError("No such operator %s" % query['operator']) # workaround for handling equal matches of NULL-values if query['operator'] == 'equals' and query['val2'] is None: query['operator'] = 'is' elif query['operator'] == 'not_equals' and query['val2'] is None: query['operator'] = 'is_not' if query['operator'] in ('equals_any',): where = str(" %%s = ANY (%s%s::citext[]) " % ( col_prefix, _pool_spec[query['val1']]['column']) ) else: where = str(" %s%s %s %%s " % ( col_prefix, _pool_spec[query['val1']]['column'], _operation_map[query['operator']] ) ) opt.append(query['val2']) return where, opt
KeyError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._expand_pool_query
1,509
def _check_pool_attr(self, attr, req_attr=None): """ Check pool attributes. """ if req_attr is None: req_attr = [] # check attribute names self._check_attr(attr, req_attr, _pool_attrs) # validate IPv4 prefix length if attr.get('ipv4_default_prefix_length') is not None: try: attr['ipv4_default_prefix_length'] = \ int(attr['ipv4_default_prefix_length']) if (attr['ipv4_default_prefix_length'] > 32 or attr['ipv4_default_prefix_length'] < 1): raise ValueError() except ValueError: raise NipapValueError('Default IPv4 prefix length must be an integer between 1 and 32.') # validate IPv6 prefix length if attr.get('ipv6_default_prefix_length'): try: attr['ipv6_default_prefix_length'] = \ int(attr['ipv6_default_prefix_length']) if (attr['ipv6_default_prefix_length'] > 128 or attr['ipv6_default_prefix_length'] < 1): raise ValueError() except __HOLE__: raise NipapValueError('Default IPv6 prefix length must be an integer between 1 and 128.')
ValueError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._check_pool_attr
1,510
def search_pool(self, auth, query, search_options=None): """ Search pool list for pools matching `query`. * `auth` [BaseAuth] AAA options. * `query` [dict_to_sql] How the search should be performed. * `search_options` [options_dict] Search options, see below. Returns a list of dicts. The `query` argument passed to this function is designed to be able to specify how quite advanced search operations should be performed in a generic format. It is internally expanded to a SQL WHERE-clause. The `query` is a dict with three elements, where one specifies the operation to perform and the two other specifies its arguments. The arguments can themselves be `query` dicts, to build more complex queries. The :attr:`operator` key specifies what operator should be used for the comparison. Currently the following operators are supported: * :data:`and` - Logical AND * :data:`or` - Logical OR * :data:`equals` - Equality; = * :data:`not_equals` - Inequality; != * :data:`like` - SQL LIKE * :data:`regex_match` - Regular expression match * :data:`regex_not_match` - Regular expression not match The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected to the comparison. :attr:`val1` can be either any pool attribute or an entire query dict. :attr:`val2` can be either the value you want to compare the pool attribute to, or an entire `query` dict. Example 1 - Find the pool whose name match 'test':: query = { 'operator': 'equals', 'val1': 'name', 'val2': 'test' } This will be expanded to the pseudo-SQL query:: SELECT * FROM pool WHERE name = 'test' Example 2 - Find pools whose name or description regex matches 'test':: query = { 'operator': 'or', 'val1': { 'operator': 'regex_match', 'val1': 'name', 'val2': 'test' }, 'val2': { 'operator': 'regex_match', 'val1': 'description', 'val2': 'test' } } This will be expanded to the pseudo-SQL query:: SELECT * FROM pool WHERE name ~* 'test' OR description ~* 'test' The search options can also be used to limit the number of rows returned or set an offset for the result. The following options are available: * :attr:`max_result` - The maximum number of pools to return (default :data:`50`). * :attr:`offset` - Offset the result list this many pools (default :data:`0`). This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.search_pool` for full understanding. """ if search_options is None: search_options = {} # # sanitize search options and set default if option missing # # max_result if 'max_result' not in search_options: search_options['max_result'] = 50 else: try: search_options['max_result'] = int(search_options['max_result']) except (ValueError, __HOLE__): raise NipapValueError('Invalid value for option' + ''' 'max_result'. Only integer values allowed.''') # offset if 'offset' not in search_options: search_options['offset'] = 0 else: try: search_options['offset'] = int(search_options['offset']) except (ValueError, TypeError): raise NipapValueError('Invalid value for option' + ''' 'offset'. Only integer values allowed.''') self._logger.debug('search_pool search_options: %s' % str(search_options)) where, opt = self._expand_pool_query(query) sql = """SELECT DISTINCT (po.id), po.id, po.name, po.description, po.default_type, po.ipv4_default_prefix_length, po.ipv6_default_prefix_length, po.member_prefixes_v4, po.member_prefixes_v6, po.used_prefixes_v4, po.used_prefixes_v6, po.free_prefixes_v4, po.free_prefixes_v6, po.total_prefixes_v4, po.total_prefixes_v6, po.total_addresses_v4, po.total_addresses_v6, po.used_addresses_v4, po.used_addresses_v6, po.free_addresses_v4, po.free_addresses_v6, po.tags, po.avps, vrf.id AS vrf_id, vrf.rt AS vrf_rt, vrf.name AS vrf_name, (SELECT array_agg(prefix::text) FROM (SELECT prefix FROM ip_net_plan WHERE pool_id=po.id ORDER BY prefix) AS a) AS prefixes FROM ip_net_pool AS po LEFT OUTER JOIN ip_net_plan AS inp ON (inp.pool_id = po.id) LEFT OUTER JOIN ip_net_vrf AS vrf ON (vrf.id = inp.vrf_id) WHERE """ + where + """ ORDER BY po.name LIMIT """ + str(search_options['max_result']) + """ OFFSET """ + str(search_options['offset']) self._execute(sql, opt) result = list() for row in self._curs_pg: result.append(dict(row)) return { 'search_options': search_options, 'result': result }
TypeError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap.search_pool
1,511
def _expand_prefix_query(self, query, table_name = None): """ Expand prefix query dict into a WHERE-clause. If you need to prefix each column reference with a table name, that can be supplied via the table_name argument. """ where = str() opt = list() # handle table name, can be None if table_name is None: col_prefix = "" else: col_prefix = table_name + "." if 'val1' not in query: raise NipapMissingInputError("'val1' must be specified") if 'val2' not in query: raise NipapMissingInputError("'val2' must be specified") if type(query['val1']) == dict and type(query['val2']) == dict: # Sub expression, recurse! This is used for boolean operators: AND OR # add parantheses sub_where1, opt1 = self._expand_prefix_query(query['val1'], table_name) sub_where2, opt2 = self._expand_prefix_query(query['val2'], table_name) try: where += str(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) ) except __HOLE__: raise NipapNoSuchOperatorError("No such operator %s" % str(query['operator'])) opt += opt1 opt += opt2 else: # TODO: raise exception if someone passes one dict and one "something else"? # val1 is key, val2 is value. if query['val1'] not in _prefix_spec: raise NipapInputError('Search variable \'%s\' unknown' % str(query['val1'])) # build where clause if query['operator'] not in _operation_map: raise NipapNoSuchOperatorError("No such operator %s" % query['operator']) if query['val1'] == 'vrf_id' and query['val2'] == None: query['val2'] = 0 # workaround for handling equal matches of NULL-values if query['operator'] == 'equals' and query['val2'] is None: query['operator'] = 'is' elif query['operator'] == 'not_equals' and query['val2'] is None: query['operator'] = 'is_not' if query['operator'] in ( 'contains', 'contains_equals', 'contained_within', 'contained_within_equals'): where = " iprange(prefix) %(operator)s %%s " % { 'col_prefix': col_prefix, 'operator': _operation_map[query['operator']] } elif query['operator'] in ('equals_any',): where = str(" %%s = ANY (%s%s::citext[]) " % ( col_prefix, _prefix_spec[query['val1']]['column']) ) elif query['operator'] in ( 'like', 'regex_match', 'regex_not_match'): # we COALESCE column with '' to allow for example a regexp # search on '.*' to match columns which are NULL in the # database where = str(" COALESCE(%s%s, '') %s %%s " % ( col_prefix, _prefix_spec[query['val1']]['column'], _operation_map[query['operator']] ) ) else: where = str(" %s%s %s %%s " % ( col_prefix, _prefix_spec[query['val1']]['column'], _operation_map[query['operator']] ) ) opt.append(query['val2']) return where, opt
KeyError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._expand_prefix_query
1,512
def find_free_prefix(self, auth, vrf, args): """ Finds free prefixes in the sources given in `args`. * `auth` [BaseAuth] AAA options. * `vrf` [vrf] Full VRF-dict specifying in which VRF the prefix should be unique. * `args` [find_free_prefix_args] Arguments to the find free prefix function. Returns a list of dicts. Prefixes can be found in two ways: from a pool of from a prefix. From a pool The `args` argument is set to a dict with key :attr:`from-pool` set to a pool spec. This is the pool from which the prefix will be assigned. Also the key :attr:`family` needs to be set to the adress family (integer 4 or 6) of the requested prefix. Optionally, also the key :attr:`prefix_length` can be added to the `attr` argument, and will then override the default prefix length. Example:: args = { 'from-pool': { 'name': 'CUSTOMER-' }, 'family': 6, 'prefix_length': 64 } From a prefix Instead of specifying a pool, a prefix which will be searched for new prefixes can be specified. In `args`, the key :attr:`from-prefix` is set to the prefix you want to allocate from and the key :attr:`prefix_length` is set to the wanted prefix length. Example:: args = { 'from-prefix': '192.0.2.0/24' 'prefix_length': 27 } The key :attr:`count` can also be set in the `args` argument to specify how many prefixes that should be returned. If omitted, the default value is 1000. The internal backend function :func:`find_free_prefix` is used internally by the :func:`add_prefix` function to find available prefixes from the given sources. It's also exposed over XML-RPC, please see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.find_free_prefix` for full understanding. """ # input sanity if type(args) is not dict: raise NipapInputError("invalid input, please provide dict as args") # TODO: find good default value for max_num # TODO: let max_num be configurable from configuration file max_count = 1000 if 'count' in args: if int(args['count']) > max_count: raise NipapValueError("count over the maximum result size") else: args['count'] = 1 if 'from-pool' in args: if 'from-prefix' in args: raise NipapInputError("specify 'from-pool' OR 'from-prefix'") if 'family' not in args: raise NipapMissingInputError("'family' must be specified with 'from-pool' mode") try: assert int(args['family']) in [ 4, 6 ] except (__HOLE__, AssertionError): raise NipapValueError("incorrect family specified, must be 4 or 6") elif 'from-prefix' in args: if type(args['from-prefix']) is not list: raise NipapInputError("from-prefix should be a list") if 'from-pool' in args: raise NipapInputError("specify 'from-pool' OR 'from-prefix'") if 'prefix_length' not in args: raise NipapMissingInputError("'prefix_length' must be specified with 'from-prefix'") if 'family' in args: raise NipapExtraneousInputError("'family' is superfluous when in 'from-prefix' mode") # determine prefixes prefixes = [] wpl = 0 if 'from-pool' in args: # extract prefixes from pool_result = self.list_pool(auth, args['from-pool']) self._logger.debug(args) if pool_result == []: raise NipapNonExistentError("Non-existent pool specified") for p in pool_result[0]['prefixes']: if self._get_afi(p) == int(args['family']): prefixes.append(p) if len(prefixes) == 0: raise NipapInputError('No prefixes of family %s in pool' % str(args['family'])) if 'prefix_length' not in args: if int(args['family']) == 4: wpl = pool_result[0]['ipv4_default_prefix_length'] else: wpl = pool_result[0]['ipv6_default_prefix_length'] afi = None if 'from-prefix' in args: for prefix in args['from-prefix']: prefix_afi = self._get_afi(prefix) if afi is None: afi = prefix_afi elif afi != prefix_afi: raise NipapInputError("mixing of address-family is not allowed for 'from-prefix' arg") prefixes.append(prefix) if 'prefix_length' in args: try: wpl = int(args['prefix_length']) except ValueError: raise NipapValueError("prefix length must be integer") # sanity check the wanted prefix length if afi == 4: if wpl < 0 or wpl > 32: raise NipapValueError("the specified wanted prefix length argument must be between 0 and 32 for ipv4") elif afi == 6: if wpl < 0 or wpl > 128: raise NipapValueError("the specified wanted prefix length argument must be between 0 and 128 for ipv6") # build SQL params = {} # TODO: this makes me want to piss my pants # we should really write a patch to psycopg2 or something to # properly adapt an python list of texts with values looking # like prefixes to a postgresql array of inets sql_prefix = ' UNION '.join('SELECT %(prefix' + str(prefixes.index(p)) + ')s AS prefix' for p in prefixes) for p in prefixes: params['prefix' + str(prefixes.index(p))] = str(p) damp = 'SELECT array_agg((prefix::text)::inet) FROM (' + sql_prefix + ') AS a' sql = """SELECT * FROM find_free_prefix(%(vrf_id)s, (""" + damp + """), %(prefix_length)s, %(max_result)s) AS prefix""" v = self._get_vrf(auth, vrf or {}, '') params['vrf_id'] = v['id'] params['prefixes'] = prefixes params['prefix_length'] = wpl params['max_result'] = args['count'] self._execute(sql, params) res = list() for row in self._curs_pg: res.append(str(row['prefix'])) return res
TypeError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap.find_free_prefix
1,513
def search_prefix(self, auth, query, search_options=None): """ Search prefix list for prefixes matching `query`. * `auth` [BaseAuth] AAA options. * `query` [dict_to_sql] How the search should be performed. * `search_options` [options_dict] Search options, see below. Returns a list of dicts. The `query` argument passed to this function is designed to be able to express quite advanced search filters. It is internally expanded to an SQL WHERE-clause. The `query` is a dict with three elements, where one specifies the operation to perform and the two other specifies its arguments. The arguments can themselves be `query` dicts, i.e. nested, to build more complex queries. The :attr:`operator` key specifies what operator should be used for the comparison. Currently the following operators are supported: * :data:`and` - Logical AND * :data:`or` - Logical OR * :data:`equals_any` - Equality of any element in array * :data:`equals` - Equality; = * :data:`not_equals` - Inequality; != * :data:`less` - Less than; < * :data:`less_or_equal` - Less than or equal to; <= * :data:`greater` - Greater than; > * :data:`greater_or_equal` - Greater than or equal to; >= * :data:`like` - SQL LIKE * :data:`regex_match` - Regular expression match * :data:`regex_not_match` - Regular expression not match * :data:`contains` - IP prefix contains * :data:`contains_equals` - IP prefix contains or is equal to * :data:`contained_within` - IP prefix is contained within * :data:`contained_within_equals` - IP prefix is contained within or equals The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected to the comparison. :attr:`val1` can be either any prefix attribute or a query dict. :attr:`val2` can be either the value you want to compare the prefix attribute to, or a `query` dict. Example 1 - Find the prefixes which contains 192.0.2.0/24:: query = { 'operator': 'contains', 'val1': 'prefix', 'val2': '192.0.2.0/24' } This will be expanded to the pseudo-SQL query:: SELECT * FROM prefix WHERE prefix contains '192.0.2.0/24' Example 2 - Find for all assignments in prefix 192.0.2.0/24:: query = { 'operator': 'and', 'val1': { 'operator': 'equals', 'val1': 'type', 'val2': 'assignment' }, 'val2': { 'operator': 'contained_within', 'val1': 'prefix', 'val2': '192.0.2.0/24' } } This will be expanded to the pseudo-SQL query:: SELECT * FROM prefix WHERE (type == 'assignment') AND (prefix contained within '192.0.2.0/24') If you want to combine more than two expressions together with a boolean expression you need to nest them. For example, to match on three values, in this case the tag 'foobar' and a prefix-length between /10 and /24, the following could be used:: query = { 'operator': 'and', 'val1': { 'operator': 'and', 'val1': { 'operator': 'greater', 'val1': 'prefix_length', 'val2': 9 }, 'val2': { 'operator': 'less_or_equal', 'val1': 'prefix_length', 'val2': 24 } }, 'val2': { 'operator': 'equals_any', 'val1': 'tags', 'val2': 'foobar' } } The `options` argument provides a way to alter the search result to assist in client implementations. Most options regard parent and children prefixes, that is the prefixes which contain the prefix(es) matching the search terms (parents) or the prefixes which are contained by the prefix(es) matching the search terms. The search options can also be used to limit the number of rows returned. The following options are available: * :attr:`parents_depth` - How many levels of parents to return. Set to :data:`-1` to include all parents. * :attr:`children_depth` - How many levels of children to return. Set to :data:`-1` to include all children. * :attr:`include_all_parents` - Include all parents, no matter what depth is specified. * :attr:`include_all_children` - Include all children, no matter what depth is specified. * :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`). * :attr:`offset` - Offset the result list this many prefixes (default :data:`0`). The options above gives the possibility to specify how many levels of parent and child prefixes to return in addition to the prefixes that actually matched the search terms. This is done by setting the :attr:`parents_depth` and :attr:`children depth` keys in the `search_options` dict to an integer value. In addition to this it is possible to get all all parents and/or children included in the result set even though they are outside the limits set with :attr:`*_depth`. The extra prefixes included will have the attribute :attr:`display` set to :data:`false` while the other ones (the actual search result togther with the ones included due to given depth) :attr:`display` set to :data:`true`. This feature is usable obtain search results with some context given around them, useful for example when displaying prefixes in a tree without the need to implement client side IP address logic. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.search_prefix` for full understanding. """ if search_options is None: search_options = {} # # sanitize search options and set default if option missing # # include_parents if 'include_all_parents' not in search_options: search_options['include_all_parents'] = False else: if search_options['include_all_parents'] not in (True, False): raise NipapValueError('Invalid value for option ' + "'include_all_parents'. Only true and false valid. Supplied value :'%s'" % str(search_options['include_all_parents'])) # include_children if 'include_all_children' not in search_options: search_options['include_all_children'] = False else: if search_options['include_all_children'] not in (True, False): raise NipapValueError('Invalid value for option ' + "'include_all_children'. Only true and false valid. Supplied value: '%s'" % str(search_options['include_all_children'])) # parents_depth if 'parents_depth' not in search_options: search_options['parents_depth'] = 0 else: try: search_options['parents_depth'] = int(search_options['parents_depth']) except (ValueError, TypeError): raise NipapValueError('Invalid value for option' + ''' 'parent_depth'. Only integer values allowed.''') # children_depth if 'children_depth' not in search_options: search_options['children_depth'] = 0 else: try: search_options['children_depth'] = int(search_options['children_depth']) except (ValueError, __HOLE__): raise NipapValueError('Invalid value for option' + ''' 'children_depth'. Only integer values allowed.''') # include_neighbors if 'include_neighbors' not in search_options: search_options['include_neighbors'] = False else: if search_options['include_neighbors'] not in (True, False): raise NipapValueError('Invalid value for option ' + "'include_neighbors'. Only true and false valid. Supplied value: '%s'" % str(search_options['include_neighbors'])) # max_result if 'max_result' not in search_options: search_options['max_result'] = 50 else: try: search_options['max_result'] = int(search_options['max_result']) except (ValueError, TypeError): raise NipapValueError('Invalid value for option' + ''' 'max_result'. Only integer values allowed.''') # offset if 'offset' not in search_options: search_options['offset'] = 0 else: try: search_options['offset'] = int(search_options['offset']) except (ValueError, TypeError): raise NipapValueError('Invalid value for option' + ''' 'offset'. Only integer values allowed.''') # parent_prefix if 'parent_prefix' not in search_options: search_options['parent_prefix'] = None else: try: _ = int(search_options['parent_prefix']) except ValueError: raise NipapValueError( "Invalid value '%s' for option 'parent_prefix'. Must be the ID of a prefix." % search_options['parent_prefix']) try: parent_prefix = self.list_prefix(auth, { 'id': search_options['parent_prefix'] })[0] except IndexError: raise NipapNonExistentError("Parent prefix %s can not be found" % search_options['parent_prefix']) self._logger.debug('search_prefix search_options: %s' % str(search_options)) # translate search options to SQL if search_options['include_all_parents'] or search_options['parents_depth'] == -1: where_parents = '' elif search_options['parents_depth'] >= 0: where_parents = 'AND p1.indent BETWEEN p2.indent - %d AND p1.indent' % search_options['parents_depth'] else: raise NipapValueError("Invalid value for option 'parents_depth'. Only integer values > -1 allowed.") if search_options['include_all_children'] or search_options['children_depth'] == -1: where_children = '' elif search_options['children_depth'] >= 0: where_children = 'AND p1.indent BETWEEN p2.indent AND p2.indent + %d' % search_options['children_depth'] else: raise NipapValueError("Invalid value for option 'children_depth'. Only integer values > -1 allowed.") if search_options['include_neighbors']: include_neighbors = 'true' else: include_neighbors = 'false' if search_options['parent_prefix']: vrf_id = 0 if parent_prefix['vrf_id']: vrf_id = parent_prefix['vrf_id'] where_parent_prefix = " WHERE (p1.vrf_id = %s AND iprange(p1.prefix) <<= iprange('%s') AND p1.indent <= %s) " % (vrf_id, parent_prefix['prefix'], parent_prefix['indent'] + 1) left_join = 'LEFT OUTER' else: where_parent_prefix = '' left_join = '' display = '(p1.prefix << p2.display_prefix OR p2.prefix <<= p1.prefix %s) OR (p2.prefix >>= p1.prefix %s)' % (where_parents, where_children) where, opt = self._expand_prefix_query(query) sql = """ SELECT id, vrf_id, vrf_rt, vrf_name, family, display, match, prefix, prefix_length, display_prefix::text AS display_prefix, description, comment, inherited_tags, tags, node, pool_id, pool_name, type, status, indent, country, order_id, customer_id, external_key, authoritative_source, alarm_priority, monitor, vlan, added, last_modified, children, total_addresses, used_addresses, free_addresses, avps, expires FROM ( SELECT DISTINCT ON(vrf_rt_order(vrf.rt), p1.prefix) p1.id, p1.prefix, p1.display_prefix, p1.description, p1.comment, COALESCE(p1.inherited_tags, '{}') AS inherited_tags, COALESCE(p1.tags, '{}') AS tags, p1.node, pool.id AS pool_id, pool.name AS pool_name, p1.type, p1.indent, p1.country, p1.order_id, p1.customer_id, p1.external_key, p1.authoritative_source, p1.alarm_priority, p1.monitor, p1.vlan, p1.added, p1.last_modified, p1.children, p1.total_addresses, p1.used_addresses, p1.free_addresses, p1.status, p1.avps, p1.expires, vrf.id AS vrf_id, vrf.rt AS vrf_rt, vrf.name AS vrf_name, masklen(p1.prefix) AS prefix_length, family(p1.prefix) AS family, (""" + display + """) AS display, CASE WHEN p1.prefix = p2.prefix THEN true ELSE false END AS match FROM ip_net_plan AS p1 -- possible set LEFT OUTER JOIN, if we are doing a parent_prefix operation """ + left_join + """ JOIN ip_net_plan AS p2 ON ( ( (p1.vrf_id = p2.vrf_id) AND ( -- Join in the parents (p1) of matching prefixes (p2) (iprange(p1.prefix) >>= iprange(p2.prefix) """ + where_parents + """) OR -- Join in the children (p1) of matching prefixes (p2) (iprange(p1.prefix) << iprange(p2.prefix) """ + where_children + """) OR -- Join in all neighbors (p1) of matching prefixes (p2) (true = """ + include_neighbors + """ AND iprange(p1.prefix) << iprange(p2.display_prefix::cidr) AND p1.indent = p2.indent) ) ) -- set match conditions for p2 AND p2.id IN ( SELECT inp.id FROM ip_net_plan AS inp JOIN ip_net_vrf AS vrf ON inp.vrf_id = vrf.id LEFT JOIN ip_net_pool AS pool ON inp.pool_id = pool.id WHERE """ + where + """ ORDER BY vrf_rt_order(vrf.rt) NULLS FIRST, prefix LIMIT """ + str(int(search_options['max_result']) + int(search_options['offset'])) + """ ) ) JOIN ip_net_vrf AS vrf ON (p1.vrf_id = vrf.id) LEFT JOIN ip_net_pool AS pool ON (p1.pool_id = pool.id) -- possible set where conditions, if we are doing a parent_prefix operation """ + where_parent_prefix + """ ORDER BY vrf_rt_order(vrf.rt) NULLS FIRST, p1.prefix, CASE WHEN p1.prefix = p2.prefix THEN 0 ELSE 1 END OFFSET """ + str(search_options['offset']) + ") AS a ORDER BY vrf_rt_order(vrf_rt) NULLS FIRST, prefix" self._execute(sql, opt) result = list() for row in self._curs_pg: result.append(dict(row)) # This is a SQL LIMIT clause implemented in Python. It is performed # here to avoid a silly planner missestimate in PostgreSQL. For too # low values of LIMIT, the planner will prefer plans with very low # startup costs which will in turn lead to the slow plan. We avoid # the low value (or any value really) of LIMIT by performing the # LIMIT in Python. There is still a LIMIT on the inner query which # together with the OFFSET, which is still performed in PostgreSQL, # yields a rather small result set and thus high speed. if len(result) >= int(search_options['max_result']): break return { 'search_options': search_options, 'result': result }
TypeError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap.search_prefix
1,514
def _expand_asn_query(self, query, table_name = None): """ Expand ASN query dict into a WHERE-clause. If you need to prefix each column reference with a table name, that can be supplied via the table_name argument. """ where = str() opt = list() # handle table name, can be None if table_name is None: col_prefix = "" else: col_prefix = table_name + "." if type(query['val1']) == dict and type(query['val2']) == dict: # Sub expression, recurse! This is used for boolean operators: AND OR # add parantheses sub_where1, opt1 = self._expand_asn_query(query['val1'], table_name) sub_where2, opt2 = self._expand_asn_query(query['val2'], table_name) try: where += str(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) ) except __HOLE__: raise NipapNoSuchOperatorError("No such operator %s" % str(query['operator'])) opt += opt1 opt += opt2 else: # TODO: raise exception if someone passes one dict and one "something else"? # val1 is variable, val2 is string. asn_attr = dict() asn_attr['asn'] = 'asn' asn_attr['name'] = 'name' if query['val1'] not in asn_attr: raise NipapInputError('Search variable \'%s\' unknown' % str(query['val1'])) # workaround for handling equal matches of NULL-values if query['operator'] == 'equals' and query['val2'] is None: query['operator'] = 'is' elif query['operator'] == 'not_equals' and query['val2'] is None: query['operator'] = 'is_not' # build where clause if query['operator'] not in _operation_map: raise NipapNoSuchOperatorError("No such operator %s" % query['operator']) where = str(" %s%s %s %%s " % ( col_prefix, asn_attr[query['val1']], _operation_map[query['operator']] ) ) opt.append(query['val2']) return where, opt
KeyError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._expand_asn_query
1,515
def search_asn(self, auth, query, search_options=None): """ Search ASNs for entries matching 'query' * `auth` [BaseAuth] AAA options. * `query` [dict_to_sql] How the search should be performed. * `search_options` [options_dict] Search options, see below. Returns a list of dicts. The `query` argument passed to this function is designed to be able to specify how quite advanced search operations should be performed in a generic format. It is internally expanded to a SQL WHERE-clause. The `query` is a dict with three elements, where one specifies the operation to perform and the two other specifies its arguments. The arguments can themselves be `query` dicts, to build more complex queries. The :attr:`operator` key specifies what operator should be used for the comparison. Currently the following operators are supported: * :data:`and` - Logical AND * :data:`or` - Logical OR * :data:`equals` - Equality; = * :data:`not_equals` - Inequality; != * :data:`like` - SQL LIKE * :data:`regex_match` - Regular expression match * :data:`regex_not_match` - Regular expression not match The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected to the comparison. :attr:`val1` can be either any prefix attribute or an entire query dict. :attr:`val2` can be either the value you want to compare the prefix attribute to, or an entire `query` dict. The search options can also be used to limit the number of rows returned or set an offset for the result. The following options are available: * :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`). * :attr:`offset` - Offset the result list this many prefixes (default :data:`0`). This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.search_tag` for full understanding. """ if search_options is None: search_options = {} # # sanitize search options and set default if option missing # # max_result if 'max_result' not in search_options: search_options['max_result'] = 50 else: try: search_options['max_result'] = int(search_options['max_result']) except (__HOLE__, TypeError): raise NipapValueError('Invalid value for option' + ''' 'max_result'. Only integer values allowed.''') # offset if 'offset' not in search_options: search_options['offset'] = 0 else: try: search_options['offset'] = int(search_options['offset']) except (ValueError, TypeError): raise NipapValueError('Invalid value for option' + ''' 'offset'. Only integer values allowed.''') self._logger.debug('search_asn search_options: %s' % str(search_options)) opt = None sql = """ SELECT * FROM ip_net_asn """ # add where clause if we have any search terms if query != {}: where, opt = self._expand_asn_query(query) sql += " WHERE " + where sql += " ORDER BY asn LIMIT " + str(search_options['max_result']) self._execute(sql, opt) result = list() for row in self._curs_pg: result.append(dict(row)) return { 'search_options': search_options, 'result': result }
ValueError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap.search_asn
1,516
def _parse_asn_query(self, query_str): """ Parse a smart search query for ASNs This is a helper function to smart_search_pool for easier unit testing of the parser. """ # find query parts query_str_parts = self._get_query_parts(query_str) # go through parts and add to query_parts list query_parts = list() for query_str_part in query_str_parts: is_int = True try: int(query_str_part['string']) except __HOLE__: is_int = False if is_int: self._logger.debug("Query part '" + query_str_part['string'] + "' interpreted as integer (ASN)") query_parts.append({ 'interpretation': { 'string': query_str_part['string'], 'interpretation': 'asn', 'attribute': 'asn', 'operator': 'equals', }, 'operator': 'equals', 'val1': 'asn', 'val2': query_str_part['string'] }) else: self._logger.debug("Query part '" + query_str_part['string'] + "' interpreted as text") query_parts.append({ 'interpretation': { 'string': query_str_part['string'], 'interpretation': 'text', 'attribute': 'name', 'operator': 'regex', }, 'operator': 'regex_match', 'val1': 'name', 'val2': query_str_part['string'] }) # Sum all query parts to one query query = {} if len(query_parts) > 0: query = query_parts[0] if len(query_parts) > 1: for query_part in query_parts[1:]: query = { 'interpretation': { 'interpretation': 'and', 'operator': 'and', }, 'operator': 'and', 'val1': query_part, 'val2': query } return query # # Tag functions #
ValueError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._parse_asn_query
1,517
def _expand_tag_query(self, query, table_name = None): """ Expand Tag query dict into a WHERE-clause. If you need to prefix each column reference with a table name, that can be supplied via the table_name argument. """ where = str() opt = list() # handle table name, can be None if table_name is None: col_prefix = "" else: col_prefix = table_name + "." if type(query['val1']) == dict and type(query['val2']) == dict: # Sub expression, recurse! This is used for boolean operators: AND OR # add parantheses sub_where1, opt1 = self._expand_tag_query(query['val1'], table_name) sub_where2, opt2 = self._expand_tag_query(query['val2'], table_name) try: where += str(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) ) except __HOLE__: raise NipapNoSuchOperatorError("No such operator %s" % str(query['operator'])) opt += opt1 opt += opt2 else: # TODO: raise exception if someone passes one dict and one "something else"? # val1 is variable, val2 is string. tag_attr = dict() tag_attr['name'] = 'name' if query['val1'] not in tag_attr: raise NipapInputError('Search variable \'%s\' unknown' % str(query['val1'])) # workaround for handling equal matches of NULL-values if query['operator'] == 'equals' and query['val2'] is None: query['operator'] = 'is' elif query['operator'] == 'not_equals' and query['val2'] is None: query['operator'] = 'is_not' # build where clause if query['operator'] not in _operation_map: raise NipapNoSuchOperatorError("No such operator %s" % query['operator']) where = str(" %s%s %s %%s " % ( col_prefix, tag_attr[query['val1']], _operation_map[query['operator']] ) ) opt.append(query['val2']) return where, opt
KeyError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap._expand_tag_query
1,518
def search_tag(self, auth, query, search_options=None): """ Search Tags for entries matching 'query' * `auth` [BaseAuth] AAA options. * `query` [dict_to_sql] How the search should be performed. * `search_options` [options_dict] Search options, see below. Returns a list of dicts. The `query` argument passed to this function is designed to be able to specify how quite advanced search operations should be performed in a generic format. It is internally expanded to a SQL WHERE-clause. The `query` is a dict with three elements, where one specifies the operation to perform and the two other specifies its arguments. The arguments can themselves be `query` dicts, to build more complex queries. The :attr:`operator` key specifies what operator should be used for the comparison. Currently the following operators are supported: * :data:`and` - Logical AND * :data:`or` - Logical OR * :data:`equals` - Equality; = * :data:`not_equals` - Inequality; != * :data:`like` - SQL LIKE * :data:`regex_match` - Regular expression match * :data:`regex_not_match` - Regular expression not match The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected to the comparison. :attr:`val1` can be either any prefix attribute or an entire query dict. :attr:`val2` can be either the value you want to compare the prefix attribute to, or an entire `query` dict. The search options can also be used to limit the number of rows returned or set an offset for the result. The following options are available: * :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`). * :attr:`offset` - Offset the result list this many prefixes (default :data:`0`). This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.search_asn` for full understanding. """ if search_options is None: search_options = {} # # sanitize search options and set default if option missing # # max_result if 'max_result' not in search_options: search_options['max_result'] = 50 else: try: search_options['max_result'] = int(search_options['max_result']) except (ValueError, TypeError): raise NipapValueError('Invalid value for option' + ''' 'max_result'. Only integer values allowed.''') # offset if 'offset' not in search_options: search_options['offset'] = 0 else: try: search_options['offset'] = int(search_options['offset']) except (__HOLE__, TypeError): raise NipapValueError('Invalid value for option' + ''' 'offset'. Only integer values allowed.''') self._logger.debug('search_tag search_options: %s' % str(search_options)) opt = None sql = """ SELECT * FROM (SELECT DISTINCT unnest(tags) AS name FROM ip_net_plan) AS a """ # add where clause if we have any search terms if query != {}: where, opt = self._expand_tag_query(query) sql += " WHERE " + where sql += " ORDER BY name LIMIT " + str(search_options['max_result']) self._execute(sql, opt) result = list() for row in self._curs_pg: result.append(dict(row)) return { 'search_options': search_options, 'result': result } # vim: et ts=4 :
ValueError
dataset/ETHPy150Open SpriteLink/NIPAP/nipap/nipap/backend.py/Nipap.search_tag
1,519
def load_model(path): try: return get_model(path) except __HOLE__: raise ImproperlyConfigured( "{0} must be of the form 'app_label.model_name'".format(path) ) except LookupError: raise ImproperlyConfigured("{0} has not been installed".format(path))
ValueError
dataset/ETHPy150Open haystack/eyebrowse-server/notifications/conf.py/load_model
1,520
def load_path_attr(path): i = path.rfind(".") module, attr = path[:i], path[i + 1:] try: mod = importlib.import_module(module) except __HOLE__ as e: raise ImproperlyConfigured( "Error importing {0}: '{1}'".format(module, e)) try: attr = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured( "Module '{0}' does not define a '{1}'".format(module, attr)) return attr
ImportError
dataset/ETHPy150Open haystack/eyebrowse-server/notifications/conf.py/load_path_attr
1,521
def is_installed(package): try: __import__(package) return True except __HOLE__: return False
ImportError
dataset/ETHPy150Open haystack/eyebrowse-server/notifications/conf.py/is_installed
1,522
@property def _ndim(self): try: # From array protocol array = self.context[0][0].__array_interface__ n = array['shape'][1] assert n == 2 or n == 3 return n except __HOLE__: # Fall back on list return len(self.context[0][0][0])
AttributeError
dataset/ETHPy150Open Toblerity/Shapely/shapely/geometry/multipolygon.py/MultiPolygonAdapter._ndim
1,523
def geos_multipolygon_from_polygons(arg): """ ob must be either a MultiPolygon, sequence or array of sequences or arrays. """ if isinstance(arg, MultiPolygon): return geos_geom_from_py(arg) obs = getattr(arg, 'geoms', arg) obs = [ob for ob in obs if ob and not (isinstance(ob, polygon.Polygon) and ob.is_empty)] L = len(obs) # Bail immediately if we have no input points. if L <= 0: return (lgeos.GEOSGeom_createEmptyCollection(6), 3) exemplar = obs[0] try: N = len(exemplar[0][0]) except __HOLE__: N = exemplar._ndim assert N == 2 or N == 3 subs = (c_void_p * L)() for i, ob in enumerate(obs): if isinstance(ob, polygon.Polygon): shell = ob.exterior holes = ob.interiors else: shell = ob[0] holes = ob[1] geom, ndims = polygon.geos_polygon_from_py(shell, holes) subs[i] = cast(geom, c_void_p) return (lgeos.GEOSGeom_createCollection(6, subs, L), N) # Test runner
TypeError
dataset/ETHPy150Open Toblerity/Shapely/shapely/geometry/multipolygon.py/geos_multipolygon_from_polygons
1,524
def desc(): info = read('README.rst') try: return info + '\n\n' + read('doc/changelog.rst') except __HOLE__: return info # grep flask_admin/__init__.py since python 3.x cannot import it before using 2to3
IOError
dataset/ETHPy150Open flask-admin/flask-admin/setup.py/desc
1,525
def authenticate(email=None, password=None): """ Authenticates the user based on the email field and sets the backend information """ try: backend = EmailBackend() user = backend.authenticate(email=email, password=password) except __HOLE__: # This backend doesn't accept these credentials # as arguments. Try the next one. return None # Annotate the user object with the path of the backend. if user: user.backend = "%s.%s" % ( backend.__module__, backend.__class__.__name__ ) return user
TypeError
dataset/ETHPy150Open jjdelc/django-easy-registration/easyreg/backends.py/authenticate
1,526
def upload_template(filename, destination, context=None, use_jinja=False, template_dir=None, use_sudo=False, backup=True, mirror_local_mode=False, mode=None, pty=None, keep_trailing_newline=False, temp_dir=''): """ Render and upload a template text file to a remote host. Returns the result of the inner call to `~fabric.operations.put` -- see its documentation for details. ``filename`` should be the path to a text file, which may contain `Python string interpolation formatting <http://docs.python.org/library/stdtypes.html#string-formatting>`_ and will be rendered with the given context dictionary ``context`` (if given.) Alternately, if ``use_jinja`` is set to True and you have the Jinja2 templating library available, Jinja will be used to render the template instead. Templates will be loaded from the invoking user's current working directory by default, or from ``template_dir`` if given. The resulting rendered file will be uploaded to the remote file path ``destination``. If the destination file already exists, it will be renamed with a ``.bak`` extension unless ``backup=False`` is specified. By default, the file will be copied to ``destination`` as the logged-in user; specify ``use_sudo=True`` to use `sudo` instead. The ``mirror_local_mode``, ``mode``, and ``temp_dir`` kwargs are passed directly to an internal `~fabric.operations.put` call; please see its documentation for details on these two options. The ``pty`` kwarg will be passed verbatim to any internal `~fabric.operations.run`/`~fabric.operations.sudo` calls, such as those used for testing directory-ness, making backups, etc. The ``keep_trailing_newline`` kwarg will be passed when creating Jinja2 Environment which is False by default, same as Jinja2's behaviour. .. versionchanged:: 1.1 Added the ``backup``, ``mirror_local_mode`` and ``mode`` kwargs. .. versionchanged:: 1.9 Added the ``pty`` kwarg. .. versionchanged:: 1.11 Added the ``keep_trailing_newline`` kwarg. .. versionchanged:: 1.11 Added the ``temp_dir`` kwarg. """ func = use_sudo and sudo or run if pty is not None: func = partial(func, pty=pty) # Normalize destination to be an actual filename, due to using StringIO with settings(hide('everything'), warn_only=True): if func('test -d %s' % _expand_path(destination)).succeeded: sep = "" if destination.endswith('/') else "/" destination += sep + os.path.basename(filename) # Use mode kwarg to implement mirror_local_mode, again due to using # StringIO if mirror_local_mode and mode is None: mode = os.stat(apply_lcwd(filename, env)).st_mode # To prevent put() from trying to do this # logic itself mirror_local_mode = False # Process template text = None if use_jinja: try: template_dir = template_dir or os.getcwd() template_dir = apply_lcwd(template_dir, env) from jinja2 import Environment, FileSystemLoader jenv = Environment(loader=FileSystemLoader(template_dir), keep_trailing_newline=keep_trailing_newline) text = jenv.get_template(filename).render(**context or {}) # Force to a byte representation of Unicode, or str()ification # within Paramiko's SFTP machinery may cause decode issues for # truly non-ASCII characters. text = text.encode('utf-8') except __HOLE__: import traceback tb = traceback.format_exc() abort(tb + "\nUnable to import Jinja2 -- see above.") else: if template_dir: filename = os.path.join(template_dir, filename) filename = apply_lcwd(filename, env) with open(os.path.expanduser(filename)) as inputfile: text = inputfile.read() if context: text = text % context # Back up original file if backup and exists(destination): func("cp %s{,.bak}" % _expand_path(destination)) # Upload the file. return put( local_path=StringIO(text), remote_path=destination, use_sudo=use_sudo, mirror_local_mode=mirror_local_mode, mode=mode, temp_dir=temp_dir )
ImportError
dataset/ETHPy150Open fabric/fabric/fabric/contrib/files.py/upload_template
1,527
def _replacer(x): """Replace a number with its hexadecimal representation. Used to tag temporary variables with their calling scope's id. """ # get the hex repr of the binary char and remove 0x and pad by pad_size # zeros try: hexin = ord(x) except __HOLE__: # bytes literals masquerade as ints when iterating in py3 hexin = x return hex(hexin)
TypeError
dataset/ETHPy150Open pydata/pandas/pandas/computation/scope.py/_replacer
1,528
def resolve(self, key, is_local): """Resolve a variable name in a possibly local context Parameters ---------- key : text_type A variable name is_local : bool Flag indicating whether the variable is local or not (prefixed with the '@' symbol) Returns ------- value : object The value of a particular variable """ try: # only look for locals in outer scope if is_local: return self.scope[key] # not a local variable so check in resolvers if we have them if self.has_resolvers: return self.resolvers[key] # if we're here that means that we have no locals and we also have # no resolvers assert not is_local and not self.has_resolvers return self.scope[key] except __HOLE__: try: # last ditch effort we look in temporaries # these are created when parsing indexing expressions # e.g., df[df > 0] return self.temps[key] except KeyError: raise compu.ops.UndefinedVariableError(key, is_local)
KeyError
dataset/ETHPy150Open pydata/pandas/pandas/computation/scope.py/Scope.resolve
1,529
def __call__(self, *args, **keywords): if len(args) < 2 or len(args) > 3: raise BadArgument("Start response callback requires either two or three arguments: got %s" % str(args)) if len(args) == 3: exc_info = args[2] try: try: self.http_resp.reset() except IllegalStateException, isx: raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None else: if self.called > 0: raise StartResponseCalledTwice("Start response callback may only be called once, without exception information.") status_str = args[0] headers_list = args[1] if not isinstance(status_str, types.StringType): raise BadArgument("Start response callback requires string as first argument") if not isinstance(headers_list, types.ListType): raise BadArgument("Start response callback requires list as second argument") try: status_code, status_message_str = status_str.split(" ", 1) self.http_resp.setStatus(int(status_code)) except ValueError: raise BadArgument("Status string must be of the form '<int> <string>'") self.make_write_object() try: for header_name, header_value in headers_list: header_name_lower = header_name.lower() if hop_by_hop_headers.has_key(header_name_lower): raise HopByHopHeaderSet("Under WSGI, it is illegal to set hop-by-hop headers, i.e. '%s'" % header_name) if header_name_lower == "content-length": try: self.set_content_length(int(header_value)) except ValueError, v: raise BadArgument("Content-Length header value must be a string containing an integer, not '%s'" % header_value) else: final_value = header_value.encode('latin-1') # Here would be the place to check for control characters, whitespace, etc self.http_resp.addHeader(header_name, final_value) except (AttributeError, __HOLE__), t: raise BadArgument("Start response callback headers must contain a list of (<string>,<string>) tuples") except UnicodeError, u: raise BadArgument("Encoding error: header values may only contain latin-1 characters, not '%s'" % repr(header_value)) except ValueError, v: raise BadArgument("Headers list must contain 2-tuples") self.called += 1 return self.write_callable
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/modjy/modjy_response.py/start_response_object.__call__
1,530
def check_password(user, raw_password): """Given a DB entry and a raw password, check its validity.""" # Check if the user's password is a "hardened hash". if user.password.startswith('hh$'): alg, salt, bc_pwd = user.password.split('$', 3)[1:] hash = get_hexdigest(alg, salt, raw_password) algo_and_hash, key_ver = bc_pwd.rsplit('$', 1) try: shared_key = settings.HMAC_KEYS[key_ver] except KeyError: log.info('Invalid shared key version "{0}"'.format(key_ver)) return False bc_value = algo_and_hash[6:] hmac_value = _hmac_create('$'.join([alg, salt, hash]), shared_key) if _bcrypt_verify(hmac_value, bc_value): # Password is a match, convert to bcrypt format. user.set_password(raw_password) user.save() return True return False # Normal bcrypt password checking. algo_and_hash, key_ver = user.password.rsplit('$', 1) try: shared_key = settings.HMAC_KEYS[key_ver] except __HOLE__: log.info('Invalid shared key version "{0}"'.format(key_ver)) return False bc_value = algo_and_hash[algo_and_hash.find('$'):] # Yes, bcrypt <3s the leading $. hmac_value = _hmac_create(raw_password, shared_key) matched = _bcrypt_verify(hmac_value, bc_value) # Update password hash if HMAC key has since changed. if matched and getattr(settings, 'PWD_HMAC_REKEY', True): latest_key_id = max(settings.HMAC_KEYS.keys()) if key_ver != latest_key_id: user.set_password(raw_password) user.save() return matched
KeyError
dataset/ETHPy150Open fwenzel/django-sha2/django_sha2/bcrypt_auth.py/check_password
1,531
def get_by_name(self, region, name): """Get one SSH key from a RunAbove account. :param region: Region where the key is :param name: Name of the key to retrieve """ try: region_name = region.name except __HOLE__: region_name = region url = self.basepath + '/' + self._api.encode_for_api(name) key = self._api.get(url, {'region': region_name}) return self._dict_to_obj(key)
AttributeError
dataset/ETHPy150Open runabove/python-runabove/runabove/ssh_key.py/SSHKeyManager.get_by_name
1,532
def create(self, region, name, public_key): """Register a new SSH key in a RunAbove account. :param region: Region where the key will be added :param name: Name of the key :param public_key: Public key value """ try: region_name = region.name except __HOLE__: region_name = region content = { 'publicKey': public_key, 'region': region_name, 'name': name } self._api.post(self.basepath, content) return self.get_by_name(region_name, name)
AttributeError
dataset/ETHPy150Open runabove/python-runabove/runabove/ssh_key.py/SSHKeyManager.create
1,533
def delete(self, region, key): """Delete an SSH key from an account. :param region: Region where the key is :param key: SSH key to be deleted """ try: region_name = region.name except AttributeError: region_name = region try: name = key.name except __HOLE__: name = key url = self.basepath + '/' + self._api.encode_for_api(name) return self._api.delete(url, {'region': region_name})
AttributeError
dataset/ETHPy150Open runabove/python-runabove/runabove/ssh_key.py/SSHKeyManager.delete
1,534
def test_dictOfLists(self): args = { 'openid.mode': ['checkid_setup'], 'openid.identity': self.id_url, 'openid.assoc_handle': self.assoc_handle, 'openid.return_to': self.rt_url, 'openid.trust_root': self.tr_url, } try: result = self.decode(args) except __HOLE__, err: self.failUnless(str(err).find('values') != -1, err) else: self.fail("Expected TypeError, but got result %s" % (result,))
TypeError
dataset/ETHPy150Open CollabQ/CollabQ/openid/test/test_server.py/TestDecode.test_dictOfLists
1,535
def get_port_spec_info(pipeline, module): type_map = {'OutputPort': 'output', 'InputPort': 'input'} try: type = type_map[module.name] except __HOLE__: raise VistrailsInternalError("cannot translate type '%s'" % module.name) if type == 'input': get_edges = pipeline.graph.edges_from get_port_name = \ lambda x: pipeline.connections[x].destination.name else: # type == 'output' get_edges = pipeline.graph.edges_to get_port_name = \ lambda x: pipeline.connections[x].source.name # conns = get_edges(module.id) # for i, m in pipeline.modules.iteritems(): # print i, m.name # for j, c in pipeline.connections.iteritems(): # print j, c.source.moduleId, c.destination.moduleId neighbors = [(pipeline.modules[m_id], get_port_name(c_id)) for (m_id, c_id) in get_edges(module.id)] port_name = neighbors[0][1] sigstring, depth = coalesce_port_specs(neighbors, type) # sigstring = neighbor.get_port_spec(port_name, type).sigstring # FIXME check old registry here? port_optional = False for function in module.functions: if function.name == 'name': port_name = function.params[0].strValue if function.name == 'optional': port_optional = function.params[0].strValue == 'True' # print 'psi:', port_name, old_name, sigstring return (port_name, sigstring, port_optional, depth, neighbors) ###############################################################################
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/modules/sub_module.py/get_port_spec_info
1,536
def test_events_addons(self): types = { amo.ADDON_ANY: None, amo.ADDON_EXTENSION: 'ADDON', amo.ADDON_THEME: 'THEME', amo.ADDON_DICT: 'DICT', amo.ADDON_SEARCH: 'SEARCH', amo.ADDON_LPAPP: 'LP', amo.ADDON_LPADDON: 'LP', amo.ADDON_PLUGIN: 'ADDON', amo.ADDON_API: 'ADDON', amo.ADDON_PERSONA: 'PERSONA', } statuses = { amo.STATUS_NULL: None, amo.STATUS_UNREVIEWED: 'PRELIM', amo.STATUS_PENDING: None, amo.STATUS_NOMINATED: 'FULL', amo.STATUS_PUBLIC: 'UPDATE', amo.STATUS_DISABLED: None, amo.STATUS_BETA: None, amo.STATUS_LITE: 'PRELIM', amo.STATUS_LITE_AND_NOMINATED: 'FULL', amo.STATUS_PURGATORY: None, amo.STATUS_DELETED: None, amo.STATUS_REJECTED: None, amo.STATUS_REVIEW_PENDING: None, amo.STATUS_BLOCKED: None, } for tk, tv in types.items(): for sk, sv in statuses.items(): try: event = getattr(amo, 'REVIEWED_%s_%s' % (tv, sv)) except AttributeError: try: event = getattr(amo, 'REVIEWED_%s' % tv) except __HOLE__: event = None self.check_event(tk, sk, event)
AttributeError
dataset/ETHPy150Open mozilla/addons-server/src/olympia/editors/tests/test_models.py/TestReviewerScore.test_events_addons
1,537
def render(engine, format, filepath): """Render file with Graphviz engine into format, return result filename. Args: engine: The layout commmand used for rendering ('dot', 'neato', ...). format: The output format used for rendering ('pdf', 'png', ...). filepath: Path to the DOT source file to render. Returns: The (possibly relative) path of the rendered file. Raises: RuntimeError: If the Graphviz executable is not found. """ args, rendered = command(engine, format, filepath) try: subprocess.call(args, startupinfo=STARTUPINFO) except __HOLE__ as e: if e.errno == errno.ENOENT: raise RuntimeError('failed to execute %r, ' 'make sure the Graphviz executables ' 'are on your systems\' path' % args) else: # pragma: no cover raise return rendered
OSError
dataset/ETHPy150Open xflr6/graphviz/graphviz/backend.py/render
1,538
def pipe(engine, format, data): """Return data piped through Graphviz engine into format. Args: engine: The layout commmand used for rendering ('dot', 'neato', ...) format: The output format used for rendering ('pdf', 'png', ...) data: The binary (encoded) DOT source string to render. Returns: Binary (encoded) stdout of the layout command. Raises: RuntimeError: If the Graphviz executable is not found. """ args, _ = command(engine, format) try: proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, startupinfo=STARTUPINFO) except __HOLE__ as e: if e.errno == errno.ENOENT: raise RuntimeError('failed to execute %r, ' 'make sure the Graphviz executables ' 'are on your systems\' path' % args) else: # pragma: no cover raise outs, errs = proc.communicate(data) return outs
OSError
dataset/ETHPy150Open xflr6/graphviz/graphviz/backend.py/pipe
1,539
def ProcessBackendNode(self, node): """Processes XML nodes labeled 'backend' into a Backends object.""" tag = xml_parser_utils.GetTag(node) if tag != 'backend': self.errors.append('Unrecognized node: <%s>' % tag) return backend = Backend() name = xml_parser_utils.GetAttribute(node, 'name') if not name: self.errors.append('All backends must have names') backend.name = '-' else: backend.name = name instance_class = xml_parser_utils.GetChildNodeText(node, 'class') if instance_class: backend.instance_class = instance_class instances = xml_parser_utils.GetChildNodeText(node, 'instances') if instances: try: backend.instances = int(instances) except __HOLE__: self.errors.append( '<instances> must be an integer (bad value %s) in backend %s' % (instances, backend.name)) max_concurrent_requests = xml_parser_utils.GetChildNodeText( node, 'max-concurrent-requests') if max_concurrent_requests: try: backend.max_concurrent_requests = int(max_concurrent_requests) except ValueError: self.errors.append('<max-concurrent-requests> must be an integer ' '(bad value %s) in backend %s' % (max_concurrent_requests, backend.name)) options_node = xml_parser_utils.GetChild(node, 'options') if options_node is not None: for sub_node in options_node.getchildren(): tag = xml_parser_utils.GetTag(sub_node) if tag not in ('fail-fast', 'dynamic', 'public'): self.errors.append('<options> only supports values fail-fast, ' 'dynamic, and public (bad value %s) in backend %s' % (tag, backend.name)) continue tag = tag.replace('-', '') if xml_parser_utils.BooleanValue(sub_node.text): backend.options.add(tag) else: if tag in backend.options: backend.options.remove(tag) self.backends.append(backend)
ValueError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/backends_xml_parser.py/BackendsXmlParser.ProcessBackendNode
1,540
def _format_date(d): try: return d.strftime('%d-%m-%Y') except __HOLE__: return d
AttributeError
dataset/ETHPy150Open dimagi/commcare-hq/custom/bihar/reports/indicators/display.py/_format_date
1,541
def pop(queue, quantity=1): ''' Pop one or more or all items from the queue return them. ''' cmd = 'SELECT name FROM {0}'.format(queue) if quantity != 'all': try: quantity = int(quantity) except __HOLE__ as exc: error_txt = ('Quantity must be an integer or "all".\n' 'Error: "{0}".'.format(exc)) raise SaltInvocationError(error_txt) cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)]) log.debug('SQL Query: {0}'.format(cmd)) con = _conn(queue) items = [] with con: cur = con.cursor() result = cur.execute(cmd).fetchall() if len(result) > 0: items = [item[0] for item in result] itemlist = '","'.join(items) _quote_escape(itemlist) del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format( queue, itemlist) log.debug('SQL Query: {0}'.format(del_cmd)) cur.execute(del_cmd) con.commit() log.info(items) return items
ValueError
dataset/ETHPy150Open saltstack/salt/salt/queues/sqlite_queue.py/pop
1,542
def __virtual__(): ''' Only work on systems that are a proxy minion ''' try: if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': return __virtualname__ except __HOLE__: return (False, 'The rest_service execution module failed to load. Check the proxy key in pillar.') return (False, 'The rest_service execution module failed to load: only works on a rest_sample proxy minion.')
KeyError
dataset/ETHPy150Open saltstack/salt/salt/modules/rest_service.py/__virtual__
1,543
def isStr(s): t = '' try: t += s except __HOLE__: return 0 return 1
TypeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Signature/test_pkcs1_pss.py/isStr
1,544
def __getattr__(self, attr): if attr in self._fields: return self._fields[attr].get() for conditional_field in self._conditional_fields: try: return getattr(conditional_field, attr) except __HOLE__: pass # pragma: no cover raise AttributeError("No known field '%s'" % (attr))
AttributeError
dataset/ETHPy150Open alexras/bread/bread/struct.py/BreadStruct.__getattr__
1,545
def __setattr__(self, attr, value): try: if attr[0] == '_': super(BreadStruct, self).__setattr__(attr, value) elif attr in self._fields: field = self._fields[attr] field.set(value) else: for conditional_field in self._conditional_fields: try: return setattr(conditional_field, attr, value) except __HOLE__: pass raise AttributeError("No known field '%s'" % (attr)) except CreationError as e: raise ValueError('Error while setting %s: %s' % (field._name, e))
AttributeError
dataset/ETHPy150Open alexras/bread/bread/struct.py/BreadStruct.__setattr__
1,546
def extractFromBinary(self,binaryImg,colorImg, minsize = 5, maxsize = -1,appx_level=3): """ This method performs blob extraction given a binary source image that is used to get the blob images, and a color source image. binarymg- The binary image with the blobs. colorImg - The color image. minSize - The minimum size of the blobs in pixels. maxSize - The maximum blob size in pixels. * *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the approximation edge - lower numbers yield better approximation. """ #If you hit this recursion limit may god have mercy on your soul. #If you really are having problems set the value higher, but this means # you have over 10,000,000 blobs in your image. sys.setrecursionlimit(5000) #h_next moves to the next external contour #v_next() moves to the next internal contour if (maxsize <= 0): maxsize = colorImg.width * colorImg.height retVal = [] test = binaryImg.meanColor() if( test[0]==0.00 and test[1]==0.00 and test[2]==0.00): return FeatureSet(retVal) # There are a couple of weird corner cases with the opencv # connect components libraries - when you try to find contours # in an all black image, or an image with a single white pixel # that sits on the edge of an image the whole thing explodes # this check catches those bugs. -KAS # Also I am submitting a bug report to Willow Garage - please bare with us. ptest = (4*255.0)/(binaryImg.width*binaryImg.height) # val if two pixels are white if( test[0]<=ptest and test[1]<=ptest and test[2]<=ptest): return retVal seq = cv.FindContours( binaryImg._getGrayscaleBitmap(), self.mMemStorage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE) if not list(seq): warnings.warn("Unable to find Blobs. Retuning Empty FeatureSet.") return FeatureSet([]) try: # note to self # http://code.activestate.com/recipes/474088-tail-call-optimization-decorator/ retVal = self._extractFromBinary(seq,False,colorImg,minsize,maxsize,appx_level) except __HOLE__,e: logger.warning("You exceeded the recursion limit. This means you probably have too many blobs in your image. We suggest you do some morphological operations (erode/dilate) to reduce the number of blobs in your image. This function was designed to max out at about 5000 blobs per image.") except e: logger.warning("SimpleCV Find Blobs Failed - This could be an OpenCV python binding issue") del seq return FeatureSet(retVal)
RuntimeError
dataset/ETHPy150Open sightmachine/SimpleCV/SimpleCV/Features/BlobMaker.py/BlobMaker.extractFromBinary
1,547
def get_token_from_authorization(self, authorization): try: scheme, token = authorization.split() except __HOLE__: raise ApiError(401, {'code': 'invalid_authorization'}) if scheme != 'Bearer': raise ApiError(401, {'code': 'invalid_authorization.scheme'}) return token
ValueError
dataset/ETHPy150Open 4Catalyzer/flask-resty/flask_resty/jwt.py/JwtAuthentication.get_token_from_authorization
1,548
def get_page(self, object_list): ''' Return a paginated object list, along with some metadata If self.page_size is not None, pagination is enabled. The page_size can be overridden by passing it in the requst, but it is limited to 1 < page_size < max_page_size. max_page_size defaults to self.page_size. If a page_num is passed, it is fed to regular Django pagination If an offset is passed, page_num is derived from it by dividing it by page_size. ''' page_size = getattr(self, 'page_size', None) if page_size is None: return { 'meta': {}, 'objects': object_list, } max_page_size = getattr(self, 'max_page_size', page_size) try: page_size = int(self.request.GET.get(self.LIMIT_PARAM, page_size)) except __HOLE__: raise http.NotFound('Invalid page size') page_size = max(1, min(page_size, max_page_size)) page_num = 0 try: page_num = int(self.request.GET[self.PAGE_PARAM]) except ValueError: # Bad page - default to 0 pass except KeyError: try: offset = int(self.request.GET[self.OFFSET_PARAM]) except (ValueError, KeyError): pass else: page_num = offset // page_size paginator = Paginator(object_list, page_size, allow_empty_first_page=True) try: page = paginator.page(page_num + 1) except EmptyPage: raise http.NotFound() return { 'meta': { 'offset': page.start_index() - 1, 'page': page_num, 'total_pages': paginator.num_pages, 'limit': page_size, 'count': paginator.count, 'has_next': page.has_next(), 'has_prev': page.has_previous(), }, 'objects': page.object_list, } # Response helpers
ValueError
dataset/ETHPy150Open funkybob/django-nap/nap/rest/publisher.py/Publisher.get_page
1,549
@verbose def export_volume(self, fname, include_surfaces=True, include_discrete=True, dest='mri', trans=None, mri_resolution=False, use_lut=True, verbose=None): """Exports source spaces to nifti or mgz file Parameters ---------- fname : str Name of nifti or mgz file to write. include_surfaces : bool If True, include surface source spaces. include_discrete : bool If True, include discrete source spaces. dest : 'mri' | 'surf' If 'mri' the volume is defined in the coordinate system of the original T1 image. If 'surf' the coordinate system of the FreeSurfer surface is used (Surface RAS). trans : dict, str, or None Either a transformation filename (usually made using mne_analyze) or an info dict (usually opened using read_trans()). If string, an ending of `.fif` or `.fif.gz` will be assumed to be in FIF format, any other ending will be assumed to be a text file with a 4x4 transformation matrix (like the `--trans` MNE-C option. Must be provided if source spaces are in head coordinates and include_surfaces and mri_resolution are True. mri_resolution : bool If True, the image is saved in MRI resolution (e.g. 256 x 256 x 256). use_lut : bool If True, assigns a numeric value to each source space that corresponds to a color on the freesurfer lookup table. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Notes ----- This method requires nibabel. """ # import nibabel or raise error try: import nibabel as nib except __HOLE__: raise ImportError('This function requires nibabel.') # Check coordinate frames of each source space coord_frames = np.array([s['coord_frame'] for s in self]) # Raise error if trans is not provided when head coordinates are used # and mri_resolution and include_surfaces are true if (coord_frames == FIFF.FIFFV_COORD_HEAD).all(): coords = 'head' # all sources in head coordinates if mri_resolution and include_surfaces: if trans is None: raise ValueError('trans containing mri to head transform ' 'must be provided if mri_resolution and ' 'include_surfaces are true and surfaces ' 'are in head coordinates') elif trans is not None: logger.info('trans is not needed and will not be used unless ' 'include_surfaces and mri_resolution are True.') elif (coord_frames == FIFF.FIFFV_COORD_MRI).all(): coords = 'mri' # all sources in mri coordinates if trans is not None: logger.info('trans is not needed and will not be used unless ' 'sources are in head coordinates.') # Raise error if all sources are not in the same space, or sources are # not in mri or head coordinates else: raise ValueError('All sources must be in head coordinates or all ' 'sources must be in mri coordinates.') # use lookup table to assign values to source spaces logger.info('Reading FreeSurfer lookup table') # read the lookup table lut = _get_lut() # Setup a dictionary of source types src_types = dict(volume=[], surface=[], discrete=[]) # Populate dictionary of source types for src in self: # volume sources if src['type'] == 'vol': src_types['volume'].append(src) # surface sources elif src['type'] == 'surf': src_types['surface'].append(src) # discrete sources elif src['type'] == 'discrete': src_types['discrete'].append(src) # raise an error if dealing with source type other than volume # surface or discrete else: raise ValueError('Unrecognized source type: %s.' % src['type']) # Get shape, inuse array and interpolation matrix from volume sources inuse = 0 for ii, vs in enumerate(src_types['volume']): # read the lookup table value for segmented volume if 'seg_name' not in vs: raise ValueError('Volume sources should be segments, ' 'not the entire volume.') # find the color value for this volume id_ = _get_lut_id(lut, vs['seg_name'], use_lut) if ii == 0: # get the inuse array if mri_resolution: # read the mri file used to generate volumes aseg_data = nib.load(vs['mri_file']).get_data() # get the voxel space shape shape3d = (vs['mri_height'], vs['mri_depth'], vs['mri_width']) else: # get the volume source space shape # read the shape in reverse order # (otherwise results are scrambled) shape3d = vs['shape'][2::-1] if mri_resolution: # get the values for this volume use = id_ * (aseg_data == id_).astype(int).ravel('F') else: use = id_ * vs['inuse'] inuse += use # Raise error if there are no volume source spaces if np.array(inuse).ndim == 0: raise ValueError('Source spaces must contain at least one volume.') # create 3d grid in the MRI_VOXEL coordinate frame # len of inuse array should match shape regardless of mri_resolution assert len(inuse) == np.prod(shape3d) # setup the image in 3d space img = inuse.reshape(shape3d).T # include surface and/or discrete source spaces if include_surfaces or include_discrete: # setup affine transform for source spaces if mri_resolution: # get the MRI to MRI_VOXEL transform affine = invert_transform(vs['vox_mri_t']) else: # get the MRI to SOURCE (MRI_VOXEL) transform affine = invert_transform(vs['src_mri_t']) # modify affine if in head coordinates if coords == 'head': # read mri -> head transformation mri_head_t = _get_trans(trans)[0] # get the HEAD to MRI transform head_mri_t = invert_transform(mri_head_t) # combine transforms, from HEAD to MRI_VOXEL affine = combine_transforms(head_mri_t, affine, 'head', 'mri_voxel') # loop through the surface source spaces if include_surfaces: # get the surface names (assumes left, right order. may want # to add these names during source space generation surf_names = ['Left-Cerebral-Cortex', 'Right-Cerebral-Cortex'] for i, surf in enumerate(src_types['surface']): # convert vertex positions from their native space # (either HEAD or MRI) to MRI_VOXEL space srf_rr = apply_trans(affine['trans'], surf['rr']) # convert to numeric indices ix_orig, iy_orig, iz_orig = srf_rr.T.round().astype(int) # clip indices outside of volume space ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1), 0) iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1), 0) iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1), 0) # compare original and clipped indices n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip, iz_orig != iz_clip)).any(0).sum() # generate use warnings for clipping if n_diff > 0: warn('%s surface vertices lay outside of volume space.' ' Consider using a larger volume space.' % n_diff) # get surface id or use default value i = _get_lut_id(lut, surf_names[i], use_lut) # update image to include surface voxels img[ix_clip, iy_clip, iz_clip] = i # loop through discrete source spaces if include_discrete: for i, disc in enumerate(src_types['discrete']): # convert vertex positions from their native space # (either HEAD or MRI) to MRI_VOXEL space disc_rr = apply_trans(affine['trans'], disc['rr']) # convert to numeric indices ix_orig, iy_orig, iz_orig = disc_rr.T.astype(int) # clip indices outside of volume space ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1), 0) iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1), 0) iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1), 0) # compare original and clipped indices n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip, iz_orig != iz_clip)).any(0).sum() # generate use warnings for clipping if n_diff > 0: warn('%s discrete vertices lay outside of volume ' 'space. Consider using a larger volume space.' % n_diff) # set default value img[ix_clip, iy_clip, iz_clip] = 1 if use_lut: logger.info('Discrete sources do not have values on ' 'the lookup table. Defaulting to 1.') # calculate affine transform for image (MRI_VOXEL to RAS) if mri_resolution: # MRI_VOXEL to MRI transform transform = vs['vox_mri_t'].copy() else: # MRI_VOXEL to MRI transform # NOTE: 'src' indicates downsampled version of MRI_VOXEL transform = vs['src_mri_t'].copy() if dest == 'mri': # combine with MRI to RAS transform transform = combine_transforms(transform, vs['mri_ras_t'], transform['from'], vs['mri_ras_t']['to']) # now setup the affine for volume image affine = transform['trans'] # make sure affine converts from m to mm affine[:3] *= 1e3 # save volume data # setup image for file if fname.endswith(('.nii', '.nii.gz')): # save as nifit # setup the nifti header hdr = nib.Nifti1Header() hdr.set_xyzt_units('mm') # save the nifti image img = nib.Nifti1Image(img, affine, header=hdr) elif fname.endswith('.mgz'): # save as mgh # convert to float32 (float64 not currently supported) img = img.astype('float32') # save the mgh image img = nib.freesurfer.mghformat.MGHImage(img, affine) else: raise(ValueError('Unrecognized file extension')) # write image to file nib.save(img, fname)
ImportError
dataset/ETHPy150Open mne-tools/mne-python/mne/source_space.py/SourceSpaces.export_volume
1,550
@verbose def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None, sphere=(0.0, 0.0, 0.0, 90.0), bem=None, surface=None, mindist=5.0, exclude=0.0, overwrite=False, subjects_dir=None, volume_label=None, add_interpolator=True, verbose=None): """Setup a volume source space with grid spacing or discrete source space Parameters ---------- subject : str Subject to process. fname : str | None Filename to use. If None, the source space will not be saved (only returned). pos : float | dict Positions to use for sources. If float, a grid will be constructed with the spacing given by `pos` in mm, generating a volume source space. If dict, pos['rr'] and pos['nn'] will be used as the source space locations (in meters) and normals, respectively, creating a discrete source space. NOTE: For a discrete source space (`pos` is a dict), `mri` must be None. mri : str | None The filename of an MRI volume (mgh or mgz) to create the interpolation matrix over. Source estimates obtained in the volume source space can then be morphed onto the MRI volume using this interpolator. If pos is a dict, this can be None. sphere : array_like (length 4) Define spherical source space bounds using origin and radius given by (ox, oy, oz, rad) in mm. Only used if `bem` and `surface` are both None. bem : str | None Define source space bounds using a BEM file (specifically the inner skull surface). surface : str | dict | None Define source space bounds using a FreeSurfer surface file. Can also be a dictionary with entries `'rr'` and `'tris'`, such as those returned by `read_surface()`. mindist : float Exclude points closer than this distance (mm) to the bounding surface. exclude : float Exclude points closer than this distance (mm) from the center of mass of the bounding surface. overwrite: bool If True, overwrite output file (if it exists). subjects_dir : string, or None Path to SUBJECTS_DIR if it is not set in the environment. volume_label : str | None Region of interest corresponding with freesurfer lookup table. add_interpolator : bool If True and ``mri`` is not None, then an interpolation matrix will be produced. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- src : list The source space. Note that this list will have length 1 for compatibility reasons, as most functions expect source spaces to be provided as lists). Notes ----- To create a discrete source space, `pos` must be a dict, 'mri' must be None, and 'volume_label' must be None. To create a whole brain volume source space, `pos` must be a float and 'mri' must be provided. To create a volume source space from label, 'pos' must be a float, 'volume_label' must be provided, and 'mri' must refer to a .mgh or .mgz file with values corresponding to the freesurfer lookup-table (typically aseg.mgz). """ subjects_dir = get_subjects_dir(subjects_dir) if bem is not None and surface is not None: raise ValueError('Only one of "bem" and "surface" should be ' 'specified') if mri is not None: if not op.isfile(mri): raise IOError('mri file "%s" not found' % mri) if isinstance(pos, dict): raise ValueError('Cannot create interpolation matrix for ' 'discrete source space, mri must be None if ' 'pos is a dict') if volume_label is not None: if mri is None: raise RuntimeError('"mri" must be provided if "volume_label" is ' 'not None') # Check that volume label is found in .mgz file volume_labels = get_volume_labels_from_aseg(mri) if volume_label not in volume_labels: raise ValueError('Volume %s not found in file %s. Double check ' 'freesurfer lookup table.' % (volume_label, mri)) sphere = np.asarray(sphere) if sphere.size != 4: raise ValueError('"sphere" must be array_like with 4 elements') # triage bounding argument if bem is not None: logger.info('BEM file : %s', bem) elif surface is not None: if isinstance(surface, dict): if not all(key in surface for key in ['rr', 'tris']): raise KeyError('surface, if dict, must have entries "rr" ' 'and "tris"') # let's make sure we have geom info surface = _read_surface_geom(surface, verbose=False) surf_extra = 'dict()' elif isinstance(surface, string_types): if not op.isfile(surface): raise IOError('surface file "%s" not found' % surface) surf_extra = surface logger.info('Boundary surface file : %s', surf_extra) else: logger.info('Sphere : origin at (%.1f %.1f %.1f) mm' % (sphere[0], sphere[1], sphere[2])) logger.info(' radius : %.1f mm' % sphere[3]) # triage pos argument if isinstance(pos, dict): if not all(key in pos for key in ['rr', 'nn']): raise KeyError('pos, if dict, must contain "rr" and "nn"') pos_extra = 'dict()' else: # pos should be float-like try: pos = float(pos) except (TypeError, __HOLE__): raise ValueError('pos must be a dict, or something that can be ' 'cast to float()') if not isinstance(pos, float): logger.info('Source location file : %s', pos_extra) logger.info('Assuming input in millimeters') logger.info('Assuming input in MRI coordinates') logger.info('Output file : %s', fname) if isinstance(pos, float): logger.info('grid : %.1f mm' % pos) logger.info('mindist : %.1f mm' % mindist) pos /= 1000.0 # convert pos from m to mm if exclude > 0.0: logger.info('Exclude : %.1f mm' % exclude) if mri is not None: logger.info('MRI volume : %s' % mri) exclude /= 1000.0 # convert exclude from m to mm logger.info('') # Explicit list of points if not isinstance(pos, float): # Make the grid of sources sp = _make_discrete_source_space(pos) else: # Load the brain surface as a template if bem is not None: # read bem surface in the MRI coordinate frame surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN, verbose=False) logger.info('Loaded inner skull from %s (%d nodes)' % (bem, surf['np'])) elif surface is not None: if isinstance(surface, string_types): # read the surface in the MRI coordinate frame surf = _read_surface_geom(surface) else: surf = surface logger.info('Loaded bounding surface from %s (%d nodes)' % (surface, surf['np'])) surf = deepcopy(surf) surf['rr'] *= 1e-3 # must be converted to meters else: # Load an icosahedron and use that as the surface logger.info('Setting up the sphere...') surf = _get_ico_surface(3) # Scale and shift # center at origin and make radius 1 _normalize_vectors(surf['rr']) # normalize to sphere (in MRI coord frame) surf['rr'] *= sphere[3] / 1000.0 # scale by radius surf['rr'] += sphere[:3] / 1000.0 # move by center _complete_surface_info(surf, True) # Make the grid of sources in MRI space sp = _make_volume_source_space(surf, pos, exclude, mindist, mri, volume_label) # Compute an interpolation matrix to show data in MRI_VOXEL coord frame if mri is not None: _add_interpolator(sp, mri, add_interpolator) elif sp['type'] == 'vol': # If there is no interpolator, it's actually a discrete source space sp['type'] = 'discrete' if 'vol_dims' in sp: del sp['vol_dims'] # Save it sp.update(dict(nearest=None, dist=None, use_tris=None, patch_inds=None, dist_limit=None, pinfo=None, ntri=0, nearest_dist=None, nuse_tri=0, tris=None)) sp = SourceSpaces([sp], dict(working_dir=os.getcwd(), command_line='None')) if fname is not None: write_source_spaces(fname, sp, verbose=False) return sp
ValueError
dataset/ETHPy150Open mne-tools/mne-python/mne/source_space.py/setup_volume_source_space
1,551
def _make_volume_source_space(surf, grid, exclude, mindist, mri=None, volume_label=None, do_neighbors=True, n_jobs=1): """Make a source space which covers the volume bounded by surf""" # Figure out the grid size in the MRI coordinate frame mins = np.min(surf['rr'], axis=0) maxs = np.max(surf['rr'], axis=0) cm = np.mean(surf['rr'], axis=0) # center of mass # Define the sphere which fits the surface maxdist = np.sqrt(np.max(np.sum((surf['rr'] - cm) ** 2, axis=1))) logger.info('Surface CM = (%6.1f %6.1f %6.1f) mm' % (1000 * cm[0], 1000 * cm[1], 1000 * cm[2])) logger.info('Surface fits inside a sphere with radius %6.1f mm' % (1000 * maxdist)) logger.info('Surface extent:') for c, mi, ma in zip('xyz', mins, maxs): logger.info(' %s = %6.1f ... %6.1f mm' % (c, 1000 * mi, 1000 * ma)) maxn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else - np.floor(np.abs(m) / grid) - 1 for m in maxs], int) minn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else - np.floor(np.abs(m) / grid) - 1 for m in mins], int) logger.info('Grid extent:') for c, mi, ma in zip('xyz', minn, maxn): logger.info(' %s = %6.1f ... %6.1f mm' % (c, 1000 * mi * grid, 1000 * ma * grid)) # Now make the initial grid ns = maxn - minn + 1 npts = np.prod(ns) nrow = ns[0] ncol = ns[1] nplane = nrow * ncol # x varies fastest, then y, then z (can use unravel to do this) rr = meshgrid(np.arange(minn[2], maxn[2] + 1), np.arange(minn[1], maxn[1] + 1), np.arange(minn[0], maxn[0] + 1), indexing='ij') x, y, z = rr[2].ravel(), rr[1].ravel(), rr[0].ravel() rr = np.array([x * grid, y * grid, z * grid]).T sp = dict(np=npts, nn=np.zeros((npts, 3)), rr=rr, inuse=np.ones(npts, int), type='vol', nuse=npts, coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns) sp['nn'][:, 2] = 1.0 assert sp['rr'].shape[0] == npts logger.info('%d sources before omitting any.', sp['nuse']) # Exclude infeasible points dists = np.sqrt(np.sum((sp['rr'] - cm) ** 2, axis=1)) bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0] sp['inuse'][bads] = False sp['nuse'] -= len(bads) logger.info('%d sources after omitting infeasible sources.', sp['nuse']) _filter_source_spaces(surf, mindist, None, [sp], n_jobs) logger.info('%d sources remaining after excluding the sources outside ' 'the surface and less than %6.1f mm inside.' % (sp['nuse'], mindist)) if not do_neighbors: if volume_label is not None: raise RuntimeError('volume_label cannot be None unless ' 'do_neighbors is True') return sp k = np.arange(npts) neigh = np.empty((26, npts), int) neigh.fill(-1) # Figure out each neighborhood: # 6-neighborhood first idxs = [z > minn[2], x < maxn[0], y < maxn[1], x > minn[0], y > minn[1], z < maxn[2]] offsets = [-nplane, 1, nrow, -1, -nrow, nplane] for n, idx, offset in zip(neigh[:6], idxs, offsets): n[idx] = k[idx] + offset # Then the rest to complete the 26-neighborhood # First the plane below idx1 = z > minn[2] idx2 = np.logical_and(idx1, x < maxn[0]) neigh[6, idx2] = k[idx2] + 1 - nplane idx3 = np.logical_and(idx2, y < maxn[1]) neigh[7, idx3] = k[idx3] + 1 + nrow - nplane idx2 = np.logical_and(idx1, y < maxn[1]) neigh[8, idx2] = k[idx2] + nrow - nplane idx2 = np.logical_and(idx1, x > minn[0]) idx3 = np.logical_and(idx2, y < maxn[1]) neigh[9, idx3] = k[idx3] - 1 + nrow - nplane neigh[10, idx2] = k[idx2] - 1 - nplane idx3 = np.logical_and(idx2, y > minn[1]) neigh[11, idx3] = k[idx3] - 1 - nrow - nplane idx2 = np.logical_and(idx1, y > minn[1]) neigh[12, idx2] = k[idx2] - nrow - nplane idx3 = np.logical_and(idx2, x < maxn[0]) neigh[13, idx3] = k[idx3] + 1 - nrow - nplane # Then the same plane idx1 = np.logical_and(x < maxn[0], y < maxn[1]) neigh[14, idx1] = k[idx1] + 1 + nrow idx1 = x > minn[0] idx2 = np.logical_and(idx1, y < maxn[1]) neigh[15, idx2] = k[idx2] - 1 + nrow idx2 = np.logical_and(idx1, y > minn[1]) neigh[16, idx2] = k[idx2] - 1 - nrow idx1 = np.logical_and(y > minn[1], x < maxn[0]) neigh[17, idx1] = k[idx1] + 1 - nrow - nplane # Finally one plane above idx1 = z < maxn[2] idx2 = np.logical_and(idx1, x < maxn[0]) neigh[18, idx2] = k[idx2] + 1 + nplane idx3 = np.logical_and(idx2, y < maxn[1]) neigh[19, idx3] = k[idx3] + 1 + nrow + nplane idx2 = np.logical_and(idx1, y < maxn[1]) neigh[20, idx2] = k[idx2] + nrow + nplane idx2 = np.logical_and(idx1, x > minn[0]) idx3 = np.logical_and(idx2, y < maxn[1]) neigh[21, idx3] = k[idx3] - 1 + nrow + nplane neigh[22, idx2] = k[idx2] - 1 + nplane idx3 = np.logical_and(idx2, y > minn[1]) neigh[23, idx3] = k[idx3] - 1 - nrow + nplane idx2 = np.logical_and(idx1, y > minn[1]) neigh[24, idx2] = k[idx2] - nrow + nplane idx3 = np.logical_and(idx2, x < maxn[0]) neigh[25, idx3] = k[idx3] + 1 - nrow + nplane # Restrict sources to volume of interest if volume_label is not None: try: import nibabel as nib except __HOLE__: raise ImportError("nibabel is required to read segmentation file.") logger.info('Selecting voxels from %s' % volume_label) # Read the segmentation data using nibabel mgz = nib.load(mri) mgz_data = mgz.get_data() # Get the numeric index for this volume label lut = _get_lut() vol_id = _get_lut_id(lut, volume_label, True) # Get indices for this volume label in voxel space vox_bool = mgz_data == vol_id # Get the 3 dimensional indices in voxel space vox_xyz = np.array(np.where(vox_bool)).T # Transform to RAS coordinates # (use tkr normalization or volume won't align with surface sources) trans = _get_mgz_header(mri)['vox2ras_tkr'] # Convert transform from mm to m trans[:3] /= 1000. rr_voi = apply_trans(trans, vox_xyz) # positions of VOI in RAS space # Filter out points too far from volume region voxels dists = _compute_nearest(rr_voi, sp['rr'], return_dists=True)[1] # Maximum distance from center of mass of a voxel to any of its corners maxdist = np.sqrt(((trans[:3, :3].sum(0) / 2.) ** 2).sum()) bads = np.where(dists > maxdist)[0] # Update source info sp['inuse'][bads] = False sp['vertno'] = np.where(sp['inuse'] > 0)[0] sp['nuse'] = len(sp['vertno']) sp['seg_name'] = volume_label sp['mri_file'] = mri # Update log logger.info('%d sources remaining after excluding sources too far ' 'from VOI voxels', sp['nuse']) # Omit unused vertices from the neighborhoods logger.info('Adjusting the neighborhood info...') # remove non source-space points log_inuse = sp['inuse'] > 0 neigh[:, np.logical_not(log_inuse)] = -1 # remove these points from neigh vertno = np.where(log_inuse)[0] sp['vertno'] = vertno old_shape = neigh.shape neigh = neigh.ravel() checks = np.where(neigh >= 0)[0] removes = np.logical_not(in1d(checks, vertno)) neigh[checks[removes]] = -1 neigh.shape = old_shape neigh = neigh.T # Thought we would need this, but C code keeps -1 vertices, so we will: # neigh = [n[n >= 0] for n in enumerate(neigh[vertno])] sp['neighbor_vert'] = neigh # Set up the volume data (needed for creating the interpolation matrix) r0 = minn * grid voxel_size = grid * np.ones(3) ras = np.eye(3) sp['src_mri_t'] = _make_voxel_ras_trans(r0, ras, voxel_size) sp['vol_dims'] = maxn - minn + 1 return sp
ImportError
dataset/ETHPy150Open mne-tools/mne-python/mne/source_space.py/_make_volume_source_space
1,552
def _get_mri_header(fname): """Get MRI header using nibabel""" import nibabel as nib img = nib.load(fname) try: return img.header except __HOLE__: # old nibabel return img.get_header()
AttributeError
dataset/ETHPy150Open mne-tools/mne-python/mne/source_space.py/_get_mri_header
1,553
@verbose def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None): """Compute inter-source distances along the cortical surface This function will also try to add patch info for the source space. It will only occur if the ``dist_limit`` is sufficiently high that all points on the surface are within ``dist_limit`` of a point in the source space. Parameters ---------- src : instance of SourceSpaces The source spaces to compute distances for. dist_limit : float The upper limit of distances to include (in meters). Note: if limit < np.inf, scipy > 0.13 (bleeding edge as of 10/2013) must be installed. n_jobs : int Number of jobs to run in parallel. Will only use (up to) as many cores as there are source spaces. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- src : instance of SourceSpaces The original source spaces, with distance information added. The distances are stored in src[n]['dist']. Note: this function operates in-place. Notes ----- Requires scipy >= 0.11 (> 0.13 for `dist_limit < np.inf`). This function can be memory- and CPU-intensive. On a high-end machine (2012) running 6 jobs in parallel, an ico-5 (10242 per hemi) source space takes about 10 minutes to compute all distances (`dist_limit = np.inf`). With `dist_limit = 0.007`, computing distances takes about 1 minute. We recommend computing distances once per source space and then saving the source space to disk, as the computed distances will automatically be stored along with the source space data for future use. """ n_jobs = check_n_jobs(n_jobs) src = _ensure_src(src) if not np.isscalar(dist_limit): raise ValueError('limit must be a scalar, got %s' % repr(dist_limit)) if not check_version('scipy', '0.11'): raise RuntimeError('scipy >= 0.11 must be installed (or > 0.13 ' 'if dist_limit < np.inf') if not all(s['type'] == 'surf' for s in src): raise RuntimeError('Currently all source spaces must be of surface ' 'type') if dist_limit < np.inf: # can't do introspection on dijkstra function because it's Cython, # so we'll just try quickly here try: sparse.csgraph.dijkstra(sparse.csr_matrix(np.zeros((2, 2))), limit=1.0) except __HOLE__: raise RuntimeError('Cannot use "limit < np.inf" unless scipy ' '> 0.13 is installed') parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs) min_dists = list() min_idxs = list() logger.info('Calculating source space distances (limit=%s mm)...' % (1000 * dist_limit)) for s in src: connectivity = mesh_dist(s['tris'], s['rr']) d = parallel(p_fun(connectivity, s['vertno'], r, dist_limit) for r in np.array_split(np.arange(len(s['vertno'])), n_jobs)) # deal with indexing so we can add patch info min_idx = np.array([dd[1] for dd in d]) min_dist = np.array([dd[2] for dd in d]) midx = np.argmin(min_dist, axis=0) range_idx = np.arange(len(s['rr'])) min_dist = min_dist[midx, range_idx] min_idx = min_idx[midx, range_idx] min_dists.append(min_dist) min_idxs.append(min_idx) # now actually deal with distances, convert to sparse representation d = np.concatenate([dd[0] for dd in d]).ravel() # already float32 idx = d > 0 d = d[idx] i, j = np.meshgrid(s['vertno'], s['vertno']) i = i.ravel()[idx] j = j.ravel()[idx] d = sparse.csr_matrix((d, (i, j)), shape=(s['np'], s['np']), dtype=np.float32) s['dist'] = d s['dist_limit'] = np.array([dist_limit], np.float32) # Let's see if our distance was sufficient to allow for patch info if not any(np.any(np.isinf(md)) for md in min_dists): # Patch info can be added! for s, min_dist, min_idx in zip(src, min_dists, min_idxs): s['nearest'] = min_idx s['nearest_dist'] = min_dist _add_patch_info(s) else: logger.info('Not adding patch information, dist_limit too small') return src
TypeError
dataset/ETHPy150Open mne-tools/mne-python/mne/source_space.py/add_source_space_distances
1,554
def connect_job(job_id, deployment_name, token_manager=None, app_url=defaults.APP_URL, persist=False, websocket=None, data_url=None): """ connect to a running Juttle program by job_id """ if data_url == None: data_url = get_data_url_for_job(job_id, deployment_name, token_manager=token_manager, app_url=app_url) if websocket == None: websocket = __wss_connect(data_url, token_manager, job_id=job_id) pong = json.dumps({ 'pong': True }) if not persist: job_finished = False while not job_finished: try: data = websocket.recv() if data: payload = json.loads(data) if is_debug_enabled(): printable_payload = dict(payload) if 'points' in payload: # don't want to print out all the outputs when in # debug mode del printable_payload['points'] printable_payload['points'] = 'NOT SHOWN' debug('received %s' % json.dumps(printable_payload)) if 'ping' in payload.keys(): # ping/pong (ie heartbeat) mechanism websocket.send(pong) if is_debug_enabled(): debug('sent %s' % json.dumps(pong)) if 'job_end' in payload.keys() and payload['job_end'] == True: job_finished = True if token_manager.is_access_token_expired(): debug('refreshing access token') token_obj = { "accessToken": token_manager.get_access_token() } # refresh authentication token websocket.send(json.dumps(token_obj)) if 'error' in payload: if payload['error'] == 'NONEXISTENT-JOB': raise JutException('Job "%s" no longer running' % job_id) # return all channel messages yield payload else: debug('payload was "%s", forcing websocket reconnect' % data) raise IOError() except __HOLE__: if is_debug_enabled(): traceback.print_exc() # # We'll retry for just under 30s since internally we stop # running non persistent programs after 30s of not heartbeating # with the client # retry = 1 while retry <= 5: try: debug('network error reconnecting to job %s, ' 'try %s of 5' % (job_id, retry)) websocket = __wss_connect(data_url, token_manager, job_id=job_id) break except socket.error: if is_debug_enabled(): traceback.print_exc() retry += 1 time.sleep(5) debug('network error reconnecting to job %s, ' 'try %s of 5' % (job_id, retry)) websocket = __wss_connect(data_url, token_manager, job_id=job_id) websocket.close()
IOError
dataset/ETHPy150Open jut-io/jut-python-tools/jut/api/data_engine.py/connect_job
1,555
def http_assembler(PCAP): print "[*] Loading PCAP file..." p = rdpcap(PCAP) print "[*] Loading sessions..." sessions = p.sessions() for session in sessions: http_payload = '' for packet in sessions[session]: try: if packet[TCP].dport == 80 or packet[TCP].sport == 80: http_payload += str(packet[TCP].payload) except: pass headers = get_http_headers(http_payload) if headers is None: continue html = find_html(headers, http_payload) if html is not None: js = find_js(http_payload) if js: print "[*] JavaScript detected" t = time.time() try: os.makedirs(JS_DIR) except __HOLE__: if os.path.exists(JS_DIR): pass else: raise print "[*] Writing html file %s-%d.html to %s folder" %(PCAP, t, JS_DIR) file_name = '%s-%d.html' %(PCAP, t) fd = open('%s/%s' % (JS_DIR, file_name), 'wb') fd.write(html) fd.close() time.sleep(1) return
OSError
dataset/ETHPy150Open mertsarica/hack4career/codes/eval-finder.py/http_assembler
1,556
def _get_timeout(self, timeout): try: tout = utils.secs_to_timestr(utils.timestr_to_secs(timeout.string)) except __HOLE__: tout = timeout.string if timeout.message: tout += ' :: ' + timeout.message return tout
ValueError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/testdoc.py/JsonConverter._get_timeout
1,557
def to_dicts(collection, key=None, **kw): _dicts = [] try: for each in collection: try: each_dict = each.to_dict(**kw) if key: each_dict = key(each_dict) _dicts.append(each_dict) except __HOLE__: _dicts.append(each) except TypeError: return collection return _dicts
AttributeError
dataset/ETHPy150Open ramses-tech/nefertari/nefertari/utils/data.py/to_dicts
1,558
def sequential_id(self, uuid): if self.sequence is not None: return self.sequence.sequential_id(uuid) try: return int(uuid) except (ValueError, __HOLE__): raise ValueError("A `Sequence` instance is required " "to use non integer uuid `%s`." % (uuid,))
TypeError
dataset/ETHPy150Open caxap/redis-moment/moment/bitevents.py/Event.sequential_id
1,559
def test_DnfInstaller(): from rosdep2.platforms.redhat import DnfInstaller @patch.object(DnfInstaller, 'get_packages_to_install') def test(mock_method): installer = DnfInstaller() mock_method.return_value = [] assert [] == installer.get_install_command(['fake']) # no interactive option with YUM mock_method.return_value = ['a', 'b'] expected = [['sudo', '-H', 'dnf', '--assumeyes', '--quiet', 'install', 'a', 'b']] val = installer.get_install_command(['whatever'], interactive=False, quiet=True) assert val == expected, val + expected expected = [['sudo', '-H', 'dnf', '--quiet', 'install', 'a', 'b']] val = installer.get_install_command(['whatever'], interactive=True, quiet=True) assert val == expected, val + expected expected = [['sudo', '-H', 'dnf', '--assumeyes', 'install', 'a', 'b']] val = installer.get_install_command(['whatever'], interactive=False, quiet=False) assert val == expected, val + expected expected = [['sudo', '-H', 'dnf', 'install', 'a', 'b']] val = installer.get_install_command(['whatever'], interactive=True, quiet=False) assert val == expected, val + expected try: test() except __HOLE__: traceback.print_exc() raise
AssertionError
dataset/ETHPy150Open ros-infrastructure/rosdep/test/test_rosdep_redhat.py/test_DnfInstaller
1,560
def test_YumInstaller(): from rosdep2.platforms.redhat import YumInstaller @patch.object(YumInstaller, 'get_packages_to_install') def test(mock_method): installer = YumInstaller() mock_method.return_value = [] assert [] == installer.get_install_command(['fake']) # no interactive option with YUM mock_method.return_value = ['a', 'b'] expected = [['sudo', '-H', 'yum', '--assumeyes', '--quiet', '--skip-broken', 'install', 'a', 'b']] val = installer.get_install_command(['whatever'], interactive=False, quiet=True) assert val == expected, val + expected expected = [['sudo', '-H', 'yum', '--quiet', '--skip-broken', 'install', 'a', 'b']] val = installer.get_install_command(['whatever'], interactive=True, quiet=True) assert val == expected, val + expected expected = [['sudo', '-H', 'yum', '--assumeyes', '--skip-broken', 'install', 'a', 'b']] val = installer.get_install_command(['whatever'], interactive=False, quiet=False) assert val == expected, val + expected expected = [['sudo', '-H', 'yum', '--skip-broken', 'install', 'a', 'b']] val = installer.get_install_command(['whatever'], interactive=True, quiet=False) assert val == expected, val + expected try: test() except __HOLE__: traceback.print_exc() raise
AssertionError
dataset/ETHPy150Open ros-infrastructure/rosdep/test/test_rosdep_redhat.py/test_YumInstaller
1,561
@classmethod def parse_float(cls, string): # {{{ for bad_char in ['$', ',', '%']: string = string.replace(bad_char, '') try: return float(string) except __HOLE__: return None
ValueError
dataset/ETHPy150Open mrooney/mintapi/mintapi/api.py/Mint.parse_float
1,562
def login_and_get_token(self, email, password): # {{{ # 0: Check to see if we're already logged in. if self.token is not None: return # 1: Login. login_url = 'https://wwws.mint.com/login.event?task=L' try: self.request_and_check(login_url) except __HOLE__: raise Exception('Failed to load Mint login page') data = {'username': email} response = self.post('https://wwws.mint.com/getUserPod.xevent', data=data, headers=self.json_headers).text data = {'username': email, 'password': password, 'task': 'L', 'browser': 'firefox', 'browserVersion': '27', 'os': 'linux'} response = self.post('https://wwws.mint.com/loginUserSubmit.xevent', data=data, headers=self.json_headers).text if 'token' not in response: raise Exception('Mint.com login failed[1]') response = json.loads(response) if not response['sUser']['token']: raise Exception('Mint.com login failed[2]') # 2: Grab token. self.token = response['sUser']['token']
RuntimeError
dataset/ETHPy150Open mrooney/mintapi/mintapi/api.py/Mint.login_and_get_token
1,563
def get_accounts(self, get_detail=False): # {{{ # Issue service request. req_id = str(self.request_id) input = { 'args': { 'types': [ 'BANK', 'CREDIT', 'INVESTMENT', 'LOAN', 'MORTGAGE', 'OTHER_PROPERTY', 'REAL_ESTATE', 'VEHICLE', 'UNCLASSIFIED' ] }, 'id': req_id, 'service': 'MintAccountService', 'task': 'getAccountsSorted' # 'task': 'getAccountsSortedByBalanceDescending' } data = {'input': json.dumps([input])} account_data_url = ('https://wwws.mint.com/bundledServiceController.' 'xevent?legacy=false&token=' + self.token) response = self.post(account_data_url, data=data, headers=self.json_headers).text self.request_id = self.request_id + 1 if req_id not in response: raise Exception('Could not parse account data: ' + response) # Parse the request response = json.loads(response) accounts = response['response'][req_id]['response'] # Return datetime objects for dates for account in accounts: for df in DATE_FIELDS: if df in account: # Convert from javascript timestamp to unix timestamp # http://stackoverflow.com/a/9744811/5026 try: ts = account[df] / 1e3 except __HOLE__: # returned data is not a number, don't parse continue account[df + 'InDate'] = datetime.fromtimestamp(ts) if get_detail: accounts = self.populate_extended_account_detail(accounts) return accounts
TypeError
dataset/ETHPy150Open mrooney/mintapi/mintapi/api.py/Mint.get_accounts
1,564
def get_net_worth(self, account_data=None): if account_data is None: account_data = self.get_accounts() # account types in this list will be subtracted negative_accounts = ['loan', 'loans', 'credit'] try: net_worth = long() except __HOLE__: net_worth = 0 # iterate over accounts and add or subtract account balances for account in [a for a in account_data if a['isActive']]: current_balance = account['currentBalance'] if account['accountType'] in negative_accounts: net_worth -= current_balance else: net_worth += current_balance return net_worth
NameError
dataset/ETHPy150Open mrooney/mintapi/mintapi/api.py/Mint.get_net_worth
1,565
def main(): import getpass import argparse try: import keyring except __HOLE__: keyring = None # Parse command-line arguments {{{ cmdline = argparse.ArgumentParser() cmdline.add_argument('email', nargs='?', default=None, help='The e-mail address for your Mint.com account') cmdline.add_argument('password', nargs='?', default=None, help='The password for your Mint.com account') cmdline.add_argument('--accounts', action='store_true', dest='accounts', default=False, help='Retrieve account information' ' (default if nothing else is specified)') cmdline.add_argument('--budgets', action='store_true', dest='budgets', default=False, help='Retrieve budget information') cmdline.add_argument('--net-worth', action='store_true', dest='net_worth', default=False, help='Retrieve net worth information') cmdline.add_argument('--extended-accounts', action='store_true', dest='accounts_ext', default=False, help='Retrieve extended account information (slower, ' 'implies --accounts)') cmdline.add_argument('--transactions', '-t', action='store_true', default=False, help='Retrieve transactions') cmdline.add_argument('--filename', '-f', help='write results to file. can ' 'be {csv,json} format. default is to write to ' 'stdout.') cmdline.add_argument('--keyring', action='store_true', help='Use OS keyring for storing password ' 'information') options = cmdline.parse_args() if options.keyring and not keyring: cmdline.error('--keyring can only be used if the `keyring` ' 'library is installed.') try: from __builtin__ import raw_input as input except NameError: pass # Try to get the e-mail and password from the arguments email = options.email password = options.password if not email: # If the user did not provide an e-mail, prompt for it email = input("Mint e-mail: ") if keyring and not password: # If the keyring module is installed and we don't yet have # a password, try prompting for it password = keyring.get_password('mintapi', email) if not password: # If we still don't have a password, prompt for it password = getpass.getpass("Mint password: ") if options.keyring: # If keyring option is specified, save the password in the keyring keyring.set_password('mintapi', email, password) if options.accounts_ext: options.accounts = True if not any([options.accounts, options.budgets, options.transactions, options.net_worth]): options.accounts = True mint = Mint.create(email, password) data = None if options.accounts and options.budgets: try: accounts = make_accounts_presentable( mint.get_accounts(get_detail=options.accounts_ext) ) except: accounts = None try: budgets = mint.get_budgets() except: budgets = None data = {'accounts': accounts, 'budgets': budgets} elif options.budgets: try: data = mint.get_budgets() except: data = None elif options.accounts: try: data = make_accounts_presentable(mint.get_accounts( get_detail=options.accounts_ext) ) except: data = None elif options.transactions: data = mint.get_transactions() elif options.net_worth: data = mint.get_net_worth() # output the data if options.transactions: if options.filename is None: print(data.to_json(orient='records')) elif options.filename.endswith('.csv'): data.to_csv(options.filename, index=False) elif options.filename.endswith('.json'): data.to_json(options.filename, orient='records') else: raise ValueError('file extension must be either .csv or .json') else: if options.filename is None: print(json.dumps(data, indent=2)) elif options.filename.endswith('.json'): with open(options.filename, 'w+') as f: json.dump(data, f, indent=2) else: raise ValueError('file type must be json for non-transaction data')
ImportError
dataset/ETHPy150Open mrooney/mintapi/mintapi/api.py/main
1,566
def is_dateutil_result_obj_parsed(date_string): # handle dateutil>=2.5 tuple result first try: res, _ = parser()._parse(date_string) except __HOLE__: res = parser()._parse(date_string) if not res: return False def get_value(obj, key): value = getattr(obj, key) return str(value) if value is not None else '' return any([get_value(res, k) for k in res.__slots__])
TypeError
dataset/ETHPy150Open scrapinghub/dateparser/dateparser/utils/__init__.py/is_dateutil_result_obj_parsed
1,567
def close(self): """ Clean up resources after use. Note that the instance is no longer readable nor writable after calling close(). The method is automatically called by garbage collectors, but made public to allow explicit cleanup. """ if self._closefile: try: self.file.close() except __HOLE__: pass
IOError
dataset/ETHPy150Open twoolie/NBT/nbt/region.py/RegionFile.close
1,568
def _parse_chunk_headers(self): for x in range(32): for z in range(32): m = self.metadata[x, z] if m.status not in (STATUS_CHUNK_OK, STATUS_CHUNK_OVERLAPPING, \ STATUS_CHUNK_MISMATCHED_LENGTHS): # skip to next if status is NOT_CREATED, OUT_OF_FILE, IN_HEADER, # ZERO_LENGTH or anything else. continue try: self.file.seek(m.blockstart*SECTOR_LENGTH) # offset comes in sectors of 4096 bytes length = unpack(">I", self.file.read(4)) m.length = length[0] # unpack always returns a tuple, even unpacking one element compression = unpack(">B",self.file.read(1)) m.compression = compression[0] except __HOLE__: m.status = STATUS_CHUNK_OUT_OF_FILE continue if m.blockstart*SECTOR_LENGTH + m.length + 4 > self.size: m.status = STATUS_CHUNK_OUT_OF_FILE elif m.length <= 1: # chunk can't be zero length m.status = STATUS_CHUNK_ZERO_LENGTH elif m.length + 4 > m.blocklength * SECTOR_LENGTH: # There are not enough sectors allocated for the whole block m.status = STATUS_CHUNK_MISMATCHED_LENGTHS
IOError
dataset/ETHPy150Open twoolie/NBT/nbt/region.py/RegionFile._parse_chunk_headers
1,569
def password_callback(v, prompt1='Enter private key passphrase:', prompt2='Verify passphrase:'): from getpass import getpass while 1: try: p1=getpass(prompt1) if v: p2=getpass(prompt2) if p1==p2: break else: break except __HOLE__: return None return p1
KeyboardInterrupt
dataset/ETHPy150Open CollabQ/CollabQ/vendor/gdata/tlslite/utils/OpenSSL_RSAKey.py/password_callback
1,570
@staticmethod def ensureIsWorkingDirectory(path): """ Ensure that C{path} is a Git working directory. @type path: L{twisted.python.filepath.FilePath} @param path: The path to check. """ try: runCommand(["git", "rev-parse"], cwd=path.path) except (CommandFailed, __HOLE__): raise NotWorkingDirectory( "%s does not appear to be a Git repository." % (path.path,))
OSError
dataset/ETHPy150Open twisted/twisted/twisted/python/_release.py/GitCommand.ensureIsWorkingDirectory
1,571
def getRepositoryCommand(directory): """ Detect the VCS used in the specified directory and return either a L{SVNCommand} or a L{GitCommand} if the directory is a Subversion checkout or a Git repository, respectively. If the directory is neither one nor the other, it raises a L{NotWorkingDirectory} exception. @type directory: L{FilePath} @param directory: The directory to detect the VCS used from. @rtype: L{SVNCommand} or L{GitCommand} @raise NotWorkingDirectory: if no supported VCS can be found from the specified directory. """ try: SVNCommand.ensureIsWorkingDirectory(directory) return SVNCommand except (NotWorkingDirectory, OSError): # It's not SVN, but that's okay, eat the error pass try: GitCommand.ensureIsWorkingDirectory(directory) return GitCommand except (NotWorkingDirectory, __HOLE__): # It's not Git, but that's okay, eat the error pass raise NotWorkingDirectory("No supported VCS can be found in %s" % (directory.path,))
OSError
dataset/ETHPy150Open twisted/twisted/twisted/python/_release.py/getRepositoryCommand
1,572
def _create_new_file(self, path): attempt_open = True try: self.create(path) except __HOLE__ as e: attempt_open = False sublime.error_message("Cannot create '" + path + "'. See console for details") return attempt_open
OSError
dataset/ETHPy150Open skuroda/Sublime-AdvancedNewFile/advanced_new_file/commands/cut_to_file.py/AdvancedNewFileCutToFile._create_new_file
1,573
def import_packages_module(self): """Imports the 'vistrails.packages' package. This might need to manipulate the Python path to find it. """ if self._packages is not None: return self._packages # Imports standard packages directory conf = self._startup.temp_configuration old_sys_path = copy.copy(sys.path) if conf.check('packageDirectory'): sys.path.insert(0, conf.packageDirectory) try: import vistrails.packages except __HOLE__: debug.critical('ImportError: "packages" sys.path: %s' % sys.path) raise finally: sys.path = old_sys_path self._packages = vistrails.packages return vistrails.packages
ImportError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/packagemanager.py/PackageManager.import_packages_module
1,574
def import_user_packages_module(self): """Imports the 'userspackages' package. This will need to manipulate the Python path to find it. """ if self._userpackages is not None: return self._userpackages # Imports user packages directory old_sys_path = copy.copy(sys.path) userPackageDir = system.get_vistrails_directory('userPackageDir') if userPackageDir is not None: sys.path.insert(0, os.path.join(userPackageDir, os.path.pardir)) try: import userpackages except __HOLE__: debug.critical('ImportError: "userpackages" sys.path: %s' % sys.path) raise finally: sys.path = old_sys_path os.environ['VISTRAILS_USERPACKAGES_DIR'] = userPackageDir self._userpackages = userpackages return userpackages # possible that we don't have userPackageDir set! return None
ImportError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/packagemanager.py/PackageManager.import_user_packages_module
1,575
def __init__(self, registry, startup): global _package_manager if _package_manager: m = "Package manager can only be constructed once." raise VistrailsInternalError(m) _package_manager = self self._registry = registry self._startup = startup # Contains packages that have not yet been enabled, but exist on the # filesystem self._available_packages = {} # codepath: str -> Package # These other lists contain enabled packages self._package_list = {} # codepath: str -> Package self._package_versions = {} # identifier: str -> version -> Package self._old_identifier_map = {} # old_id: str -> new_id: str self._dependency_graph = vistrails.core.data_structures.graph.Graph() self._default_prefix_dict = \ {'basic_modules': 'vistrails.core.modules.', 'abstraction': 'vistrails.core.modules.'} self._userpackages = None self._packages = None self._abstraction_pkg = None self._currently_importing_package = None # Setup a global __import__ hook that calls Package#import_override() # for all imports executed from that package import __builtin__ self._orig_import = __builtin__.__import__ __builtin__.__import__ = self._import_override # Compute the list of available packages, _available_packages self.build_available_package_names_list() if get_vistrails_configuration().loadPackages: for pkg in self._startup.enabled_packages.itervalues(): self.add_package(pkg.name, prefix=pkg.prefix) else: try: basic_pkg = self._startup.enabled_packages['basic_modules'] except KeyError: pass else: self.add_package(basic_pkg.name, prefix=basic_pkg.prefix) try: abs_pkg = self._startup.enabled_packages['abstraction'] except __HOLE__: pass else: self.add_package(abs_pkg.name, prefix=abs_pkg.prefix)
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/packagemanager.py/PackageManager.__init__
1,576
def _import_override(self, name, globals={}, locals={}, fromlist=[], level=-1): """Overridden __import__ function. This replaces the builtin __import__ function globally so that we can track imports done from a package. This is recorded in the Package so that reloading the package actually reloads all dependent code. """ # Get the caller module, using globals (like the original __import # does) try: if globals is None: raise KeyError module = globals['__name__'] except __HOLE__: # Another method of getting the caller module, using the stack caller = inspect.currentframe().f_back module = inspect.getmodule(caller) # Some frames might not be associated to a module, because of the # use of exec for instance; we just skip these until we reach a # valid one while module is None: caller = caller.f_back if caller is None: break module = inspect.getmodule(caller) if module: module = module.__name__ # Get the Package from the module name if module: importing_pkg = None current = self._currently_importing_package if (current is not None and current.prefix and module.startswith(current.prefix + current.codepath)): importing_pkg = current else: for pkg in itertools.chain( self._package_list.itervalues(), self._available_packages.itervalues()): if (pkg.prefix is not None and module.startswith(pkg.prefix + pkg.codepath)): importing_pkg = pkg break # If we are importing directly from a package if importing_pkg is not None: old_current = self._currently_importing_package self._currently_importing_package = importing_pkg result = importing_pkg.import_override( self._orig_import, name, globals, locals, fromlist, level, package_importing_directly=True) self._currently_importing_package = old_current return result # If we are doing it indirectly (from other stuff imported from a # package) elif self._currently_importing_package is not None: return self._currently_importing_package.import_override( self._orig_import, name, globals, locals, fromlist, level, package_importing_directly=False) # Else, this is not from a package return self._orig_import(name, globals, locals, fromlist, level)
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/packagemanager.py/PackageManager._import_override
1,577
def get_available_package(self, codepath, prefix=None): try: pkg = self._available_packages[codepath] except __HOLE__: pkg = self._registry.create_package(codepath, prefix=prefix) self._available_packages[codepath] = pkg pkg.persistent_configuration = \ self._startup.get_pkg_configuration(codepath) return pkg
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/packagemanager.py/PackageManager.get_available_package
1,578
def get_package(self, identifier, version=None): # check if it's an old identifier identifier = self._old_identifier_map.get(identifier, identifier) try: package_versions = self._package_versions[identifier] if version is not None: return package_versions[version] except __HOLE__: # dynamic packages are only registered in the registry try: return self._registry.get_package_by_name(identifier, version) except MissingPackageVersion: return self._registry.get_package_by_name(identifier) max_version = '0' max_pkg = None for version, pkg in package_versions.iteritems(): if versions_increasing(max_version, version): max_version = version max_pkg = pkg return max_pkg
KeyError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/packagemanager.py/PackageManager.get_package
1,579
def build_available_package_names_list(self): def is_vistrails_package(path): if os.path.isfile(path): return (path.endswith('.py') and not path.endswith('__init__.py')) elif os.path.isdir(path): return os.path.isfile(os.path.join(path, '__init__.py')) return False def search(dirname, prefix): for name in os.listdir(dirname): if is_vistrails_package(os.path.join(dirname, name)): if name.endswith('.py'): name = name[:-3] self.get_available_package(name, prefix=prefix) # Finds standard packages packages = self.import_packages_module() # This makes VisTrails not zip-safe search(os.path.dirname(packages.__file__), prefix='vistrails.packages.') # Finds user packages userpackages = self.import_user_packages_module() if userpackages is not None: search(os.path.dirname(userpackages.__file__), prefix='userpackages.') # Finds plugin packages try: from pkg_resources import iter_entry_points except __HOLE__: pass else: for entry_point in iter_entry_points('vistrails.packages'): # Reads module name and turns it into prefix and codepath name = entry_point.module_name.rsplit('.', 1) if len(name) > 1: prefix, name = name prefix = '%s.' % prefix else: prefix = '' name, = name # Create the Package, with the right prefix self.get_available_package(name, prefix=prefix) return self._available_packages.keys()
ImportError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/packagemanager.py/PackageManager.build_available_package_names_list
1,580
def is_idx(index): """Checks if an object can work as an index or not.""" if type(index) in six.integer_types: return True elif hasattr(index, "__index__"): # Only works on Python 2.5 (PEP 357) # Exclude the array([idx]) as working as an index. Fixes #303. if (hasattr(index, "shape") and index.shape != ()): return False try: index.__index__() if isinstance(index, bool): warnings.warn( 'using a boolean instead of an integer will result in an ' 'error in the future', DeprecationWarning, stacklevel=2) return True except __HOLE__: return False elif isinstance(index, numpy.integer): return True # For Python 2.4 one should test 0-dim and 1-dim, 1-elem arrays as well elif (isinstance(index, numpy.ndarray) and (index.shape == ()) and index.dtype.str[1] == 'i'): return True return False
TypeError
dataset/ETHPy150Open PyTables/PyTables/tables/utils.py/is_idx
1,581
def tearDown(self): try: os.unlink(FILENAME) except __HOLE__: pass
OSError
dataset/ETHPy150Open google/oauth2client/tests/test_file.py/OAuth2ClientFileTests.tearDown
1,582
def setUp(self): try: os.unlink(FILENAME) except __HOLE__: pass
OSError
dataset/ETHPy150Open google/oauth2client/tests/test_file.py/OAuth2ClientFileTests.setUp
1,583
def render(self, name, value, attrs=None, choices=()): try: value = {True: u'2', False: u'3', u'2': u'2', u'3': u'3'}[value] except __HOLE__: value = u'1' return super(NullBooleanSelect, self).render(name, value, attrs, choices)
KeyError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/forms/widgets.py/NullBooleanSelect.render
1,584
def render(self, name, value, attrs=None): # value is a list of values, each corresponding to a widget # in self.widgets. if not isinstance(value, list): value = self.decompress(value) output = [] final_attrs = self.build_attrs(attrs) id_ = final_attrs.get('id', None) for i, widget in enumerate(self.widgets): try: widget_value = value[i] except __HOLE__: widget_value = None if id_: final_attrs = dict(final_attrs, id='%s_%s' % (id_, i)) output.append(widget.render(name + '_%s' % i, widget_value, final_attrs)) return mark_safe(self.format_output(output))
IndexError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/forms/widgets.py/MultiWidget.render
1,585
def main(): """Launch the API server.""" # Parse command line options if present. tornado.options.parse_command_line() options = tornado.options.options # Bring in (server-wide) configuration information. try: import configparser # Python 3.0 except __HOLE__: import ConfigParser as configparser # Read configuration information. configfiles = [options.config, os.path.join(sys.path[0], default_config_directory, default_config_file)] cfg = configparser.ConfigParser() cfg.read(configfiles) # Sanity check. if not cfg.has_section(C.FRONT_END): error("Incomplete configuration information, tried:\n\t" + "\n\t".join(configfiles)) # Allow command-line options to override the configuration file. if options.backend: cfg.set(C.DATASTORE, C.DATASTORE_BACKEND, options.backend) if options.encoding: cfg.set(C.DATASTORE, C.DATASTORE_ENCODING, options.encoding) # Load the desired interface to the datastore. backend = cfg.get(C.DATASTORE, C.DATASTORE_BACKEND) try: module = __import__('datastore.ds_' + backend, fromlist=['Datastore']) datastore = module.Datastore(cfg) except ImportError, x: error("Could not initialize datastore of type \"%s\": %s" % (backend, str(x))) # Initialize the OSM element factory and other modules. init_slabutil(cfg) init_osm_factory(cfg) # Create an instance of the front-end server. port = cfg.getint(C.FRONT_END, C.PORT) feserver = OSMFrontEndServer(cfg, options, datastore) http_server = tornado.httpserver.HTTPServer(feserver.application) http_server.listen(port) # Start the server. try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: if options.verbose: pass # Print statistics etc. # # Invoke main() #
ImportError
dataset/ETHPy150Open MapQuest/mapquest-osm-server/src/python/frontend/__main__.py/main
1,586
def __init__(self, skeleton, savefile_name, skin=False): super(BoneUILayer, self).__init__() self.user_skin = skin self.count = 0 self.savefile_name = savefile_name try: self.animation = pickle.load( open(savefile_name, "rb") ) except __HOLE__: self.animation = Animation(skeleton) self.timeline = ui.TimeLine(self.animation) self.add(self.timeline) self.tick_delta = 1.0 / 16 self.skeleton = skeleton self.editable_skeleton = None self.animating = False self.animation.move_start() self.update_visual()
IOError
dataset/ETHPy150Open los-cocos/cocos/tools/skeleton/animator.py/BoneUILayer.__init__
1,587
def bind_api(**config): class APIMethod(object): api = config['api'] path = config['path'] payload_type = config.get('payload_type', None) payload_list = config.get('payload_list', False) allowed_param = config.get('allowed_param', []) method = config.get('method', 'GET') require_auth = config.get('require_auth', False) search_api = config.get('search_api', False) upload_api = config.get('upload_api', False) use_cache = config.get('use_cache', True) session = requests.Session() def __init__(self, args, kwargs): api = self.api # If authentication is required and no credentials # are provided, throw an error. if self.require_auth and not api.auth: raise TweepError('Authentication required!') self.post_data = kwargs.pop('post_data', None) self.retry_count = kwargs.pop('retry_count', api.retry_count) self.retry_delay = kwargs.pop('retry_delay', api.retry_delay) self.retry_errors = kwargs.pop('retry_errors', api.retry_errors) self.wait_on_rate_limit = kwargs.pop('wait_on_rate_limit', api.wait_on_rate_limit) self.wait_on_rate_limit_notify = kwargs.pop('wait_on_rate_limit_notify', api.wait_on_rate_limit_notify) self.parser = kwargs.pop('parser', api.parser) self.session.headers = kwargs.pop('headers', {}) self.build_parameters(args, kwargs) # Pick correct URL root to use if self.search_api: self.api_root = api.search_root elif self.upload_api: self.api_root = api.upload_root else: self.api_root = api.api_root # Perform any path variable substitution self.build_path() if self.search_api: self.host = api.search_host elif self.upload_api: self.host = api.upload_host else: self.host = api.host # Manually set Host header to fix an issue in python 2.5 # or older where Host is set including the 443 port. # This causes Twitter to issue 301 redirect. # See Issue https://github.com/tweepy/tweepy/issues/12 self.session.headers['Host'] = self.host # Monitoring rate limits self._remaining_calls = None self._reset_time = None def build_parameters(self, args, kwargs): self.session.params = {} for idx, arg in enumerate(args): if arg is None: continue try: self.session.params[self.allowed_param[idx]] = convert_to_utf8_str(arg) except __HOLE__: raise TweepError('Too many parameters supplied!') for k, arg in kwargs.items(): if arg is None: continue if k in self.session.params: raise TweepError('Multiple values for parameter %s supplied!' % k) self.session.params[k] = convert_to_utf8_str(arg) log.info("PARAMS: %r", self.session.params) def build_path(self): for variable in re_path_template.findall(self.path): name = variable.strip('{}') if name == 'user' and 'user' not in self.session.params and self.api.auth: # No 'user' parameter provided, fetch it from Auth instead. value = self.api.auth.get_username() else: try: value = quote(self.session.params[name]) except KeyError: raise TweepError('No parameter value found for path variable: %s' % name) del self.session.params[name] self.path = self.path.replace(variable, value) def execute(self): self.api.cached_result = False # Build the request URL url = self.api_root + self.path full_url = 'https://' + self.host + url # Query the cache if one is available # and this request uses a GET method. if self.use_cache and self.api.cache and self.method == 'GET': cache_result = self.api.cache.get(url) # if cache result found and not expired, return it if cache_result: # must restore api reference if isinstance(cache_result, list): for result in cache_result: if isinstance(result, Model): result._api = self.api else: if isinstance(cache_result, Model): cache_result._api = self.api self.api.cached_result = True return cache_result # Continue attempting request until successful # or maximum number of retries is reached. retries_performed = 0 while retries_performed < self.retry_count + 1: # handle running out of api calls if self.wait_on_rate_limit: if self._reset_time is not None: if self._remaining_calls is not None: if self._remaining_calls < 1: sleep_time = self._reset_time - int(time.time()) if sleep_time > 0: if self.wait_on_rate_limit_notify: print("Rate limit reached. Sleeping for:", sleep_time) time.sleep(sleep_time + 5) # sleep for few extra sec # if self.wait_on_rate_limit and self._reset_time is not None and \ # self._remaining_calls is not None and self._remaining_calls < 1: # sleep_time = self._reset_time - int(time.time()) # if sleep_time > 0: # if self.wait_on_rate_limit_notify: # print("Rate limit reached. Sleeping for: " + str(sleep_time)) # time.sleep(sleep_time + 5) # sleep for few extra sec # Apply authentication if self.api.auth: auth = self.api.auth.apply_auth() # Request compression if configured if self.api.compression: self.session.headers['Accept-encoding'] = 'gzip' # Execute request try: resp = self.session.request(self.method, full_url, data=self.post_data, timeout=self.api.timeout, auth=auth, proxies=self.api.proxy) except Exception as e: raise TweepError('Failed to send request: %s' % e) rem_calls = resp.headers.get('x-rate-limit-remaining') if rem_calls is not None: self._remaining_calls = int(rem_calls) elif isinstance(self._remaining_calls, int): self._remaining_calls -= 1 reset_time = resp.headers.get('x-rate-limit-reset') if reset_time is not None: self._reset_time = int(reset_time) if self.wait_on_rate_limit and self._remaining_calls == 0 and ( # if ran out of calls before waiting switching retry last call resp.status_code == 429 or resp.status_code == 420): continue retry_delay = self.retry_delay # Exit request loop if non-retry error code if resp.status_code == 200: break elif (resp.status_code == 429 or resp.status_code == 420) and self.wait_on_rate_limit: if 'retry-after' in resp.headers: retry_delay = float(resp.headers['retry-after']) elif self.retry_errors and resp.status_code not in self.retry_errors: break # Sleep before retrying request again time.sleep(retry_delay) retries_performed += 1 # If an error was returned, throw an exception self.api.last_response = resp if resp.status_code and not 200 <= resp.status_code < 300: try: error_msg, api_error_code = \ self.parser.parse_error(resp.text) except Exception: error_msg = "Twitter error response: status code = %s" % resp.status_code api_error_code = None if is_rate_limit_error_message(error_msg): raise RateLimitError(error_msg, resp) else: raise TweepError(error_msg, resp, api_code=api_error_code) # Parse the response payload result = self.parser.parse(self, resp.text) # Store result into cache if one is available. if self.use_cache and self.api.cache and self.method == 'GET' and result: self.api.cache.store(url, result) return result def _call(*args, **kwargs): method = APIMethod(args, kwargs) if kwargs.get('create'): return method else: return method.execute() # Set pagination mode if 'cursor' in APIMethod.allowed_param: _call.pagination_mode = 'cursor' elif 'max_id' in APIMethod.allowed_param: if 'since_id' in APIMethod.allowed_param: _call.pagination_mode = 'id' elif 'page' in APIMethod.allowed_param: _call.pagination_mode = 'page' return _call
IndexError
dataset/ETHPy150Open tweepy/tweepy/tweepy/binder.py/bind_api
1,588
@_git_check def last_checked_for_updates(gitrepo): """ Find out the last time plugins were checked for updates. :param string gitrepo: path to the initialized Git repository :returns: Unix timestamp the last time it was checked, ``0`` if this is the first time. """ retval = 0 config = get_jigconfig(gitrepo) try: timestamp = int(config.get('jig', 'last_checked_for_updates')) retval = datetime.utcfromtimestamp(timestamp) except (NoSectionError, NoOptionError, __HOLE__): pass return retval
ValueError
dataset/ETHPy150Open robmadole/jig/src/jig/plugins/tools.py/last_checked_for_updates
1,589
def reload_request(self, locator, index=None): self.web_element_reload = True try: # element was found out via find_elements if index is not None: web_els = self.src_element.find_elements(*locator) web_el = web_els[index] else: web_el = self.src_element.find_element(*locator) except (exceptions.NoSuchElementException, __HOLE__): return False self.web_element_reload = False return web_el
IndexError
dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/test/integration_tests/webdriver.py/WebElementWrapper.reload_request
1,590
def reload_request(self, locator, index): try: # element was found out via find_elements if index is not None: web_els = self.find_elements(*locator) web_el = web_els[index] else: web_el = self.find_element(*locator) return web_el except (exceptions.NoSuchElementException, __HOLE__): raise ElementNotReloadableException()
IndexError
dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/test/integration_tests/webdriver.py/WebDriverWrapper.reload_request
1,591
def _emit(self, record, **kwargs): data = {} extra = getattr(record, 'data', None) if not isinstance(extra, dict): if extra: extra = {'data': extra} else: extra = {} for k, v in iteritems(vars(record)): if k in RESERVED: continue if k.startswith('_'): continue if '.' not in k and k not in ('culprit', 'server_name', 'fingerprint'): extra[k] = v else: data[k] = v stack = getattr(record, 'stack', None) if stack is True: stack = iter_stack_frames() if stack: stack = self._get_targetted_stack(stack, record) date = datetime.datetime.utcfromtimestamp(record.created) event_type = 'raven.events.Message' handler_kwargs = { 'params': record.args, } try: handler_kwargs['message'] = text_type(record.msg) except UnicodeDecodeError: # Handle binary strings where it should be unicode... handler_kwargs['message'] = repr(record.msg)[1:-1] try: handler_kwargs['formatted'] = text_type(record.message) except __HOLE__: # Handle binary strings where it should be unicode... handler_kwargs['formatted'] = repr(record.message)[1:-1] # If there's no exception being processed, exc_info may be a 3-tuple of None # http://docs.python.org/library/sys.html#sys.exc_info if record.exc_info and all(record.exc_info): # capture the standard message first so that we ensure # the event is recorded as an exception, in addition to having our # message interface attached handler = self.client.get_handler(event_type) data.update(handler.capture(**handler_kwargs)) event_type = 'raven.events.Exception' handler_kwargs = {'exc_info': record.exc_info} # HACK: discover a culprit when we normally couldn't elif not (data.get('stacktrace') or data.get('culprit')) \ and (record.name or record.funcName): culprit = label_from_frame({ 'module': record.name, 'function': record.funcName }) if culprit: data['culprit'] = culprit data['level'] = record.levelno data['logger'] = record.name if hasattr(record, 'tags'): kwargs['tags'] = record.tags elif self.tags: kwargs['tags'] = self.tags kwargs.update(handler_kwargs) return self.client.capture( event_type, stack=stack, data=data, extra=extra, date=date, **kwargs)
UnicodeDecodeError
dataset/ETHPy150Open getsentry/raven-python/raven/handlers/logging.py/SentryHandler._emit
1,592
def run_only_if_gearman_is_available(func): try: import gearman except __HOLE__: gearman = None pred = lambda: gearman is not None return run_only(func, pred)
ImportError
dataset/ETHPy150Open Yelp/fullerite/src/diamond/collectors/gearman_stats/test/testgearmanstats.py/run_only_if_gearman_is_available
1,593
def handle(self, *args, **options): verbosity = int(options.get('verbosity', 1)) depth = int(options.get('depth', 3)) auth = _parse_auth(options.get('auth')) if verbosity > 1: log_level = logging.DEBUG elif verbosity: log_level = logging.INFO else: log_level = logging.WARN crawl_logger = logging.getLogger('crawler') crawl_logger.setLevel(logging.DEBUG) crawl_logger.propagate = 0 log_stats = LogStatsHandler() crawl_logger.addHandler(log_stats) console = logging.StreamHandler() console.setLevel(log_level) console.setFormatter(logging.Formatter("%(name)s [%(levelname)s] %(module)s: %(message)s")) crawl_logger.addHandler(console) if len(args) > 1: raise CommandError('Only one start url is currently supported.') else: start_url = args[0] if args else '/' if settings.ADMIN_FOR: settings_modules = [__import__(m, {}, {}, ['']) for m in settings.ADMIN_FOR] else: settings_modules = [settings] conf_urls = {} # Build the list URLs to test from urlpatterns: for settings_mod in settings_modules: try: urlconf = __import__(settings_mod.ROOT_URLCONF, {}, {}, ['']) except Exception, e: logging.exception("Error occurred while trying to load %s: %s", settings_mod.ROOT_URLCONF, str(e)) continue view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns) for (func, regex) in view_functions: #Get function name and add it to the hash of URLConf urls func_name = hasattr(func, '__name__') and func.__name__ or repr(func) conf_urls[regex] = ['func.__module__', func_name] c = Crawler(start_url, conf_urls=conf_urls, verbosity=verbosity, output_dir=options.get("output_dir"), ascend=not options.get("no_parent"), auth=auth, ) # Load plugins: for p in options['plugins']: # This nested try is somewhat unsightly but allows easy Pythonic # usage ("--enable-plugin=tidy") instead of Java-esque # "--enable-plugin=test_utils.crawler.plugins.tidy" try: try: plugin_module = __import__(p) except __HOLE__: if not "." in p: plugin_module = __import__( "test_utils.crawler.plugins.%s" % p, fromlist=["test_utils.crawler.plugins"] ) else: raise c.plugins.append(plugin_module.PLUGIN()) except (ImportError, AttributeError), e: crawl_logger.critical("Unable to load plugin %s: %s", p, e) sys.exit(3) c.run(max_depth=depth) # We'll exit with a non-zero status if we had any errors max_log_level = max(log_stats.stats.keys()) if max_log_level >= logging.ERROR: sys.exit(2) elif max_log_level >= logging.WARNING: sys.exit(1) else: sys.exit(0)
ImportError
dataset/ETHPy150Open ericholscher/django-test-utils/test_utils/management/commands/crawlurls.py/Command.handle
1,594
def compute(self): left_t = self.get_input('left_table') right_t = self.get_input('right_table') case_sensitive = self.get_input('case_sensitive') always_prefix = self.get_input('always_prefix') def get_column_idx(table, prefix): col_name_port = "%s_column_name" % prefix col_idx_port = '%s_column_idx' % prefix try: col_idx = choose_column( table.columns, column_names=table.names, name=self.force_get_input(col_name_port, None), index=self.force_get_input(col_idx_port, None)) except __HOLE__, e: raise ModuleError(self, e.message) return col_idx left_key_col = get_column_idx(left_t, "left") right_key_col = get_column_idx(right_t, "right") table = JoinedTables(left_t, right_t, left_key_col, right_key_col, case_sensitive, always_prefix) self.set_output('value', table)
ValueError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/tabledata/operations.py/JoinTables.compute
1,595
def compute(self): table = self.get_input("table") try: indexes = choose_columns( table.columns, column_names=table.names, names=self.force_get_input('column_names', None), indexes=self.force_get_input('column_indexes', None)) except __HOLE__, e: raise ModuleError(self, e.message) if self.has_input('new_column_names'): column_names = self.get_input('new_column_names') if len(column_names) != len(indexes): raise ModuleError(self, "new_column_names was specified but doesn't " "have the right number of names") else: column_names = [] names = {} for i in indexes: name = table.names[i] if name in names: nb = names[name] names[name] += 1 name = '%s_%d' % (name, nb) else: names[name] = 1 column_names.append(name) projected_table = ProjectedTable(table, indexes, column_names) self.set_output("value", projected_table)
ValueError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/tabledata/operations.py/ProjectTable.compute
1,596
def compute(self): table = self.get_input('table') if self.has_input('str_expr'): (col, comparer, comparand) = self.get_input('str_expr') elif self.has_input('float_expr'): (col, comparer, comparand) = self.get_input('float_expr') else: raise ModuleError(self, "Must have some expression") try: idx = int(col) except __HOLE__: try: idx = table.names.index(col) except ValueError: raise ModuleError(self, "No column %r" % col) else: if idx < 0 or idx >= table.columns: raise ModuleError(self, "No column %d, table only has %d columns" % ( idx, table.columns)) condition = self.make_condition(comparand, comparer) numeric = isinstance(comparand, float) column = table.get_column(idx, numeric) matched_rows = [i for i, col_val in enumerate(column) if condition(col_val)] columns = [] for col in xrange(table.columns): column = table.get_column(col) columns.append([column[row] for row in matched_rows]) selected_table = TableObject(columns, len(matched_rows), table.names) self.set_output('value', selected_table)
ValueError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/tabledata/operations.py/SelectFromTable.compute
1,597
def parseInfo(self, attributes, line=None): ''' Parse the attributes line of an entry, line parameter provided purely for backwards compatability''' # remove comments attributes = attributes.split("#")[0] # separate into fields fields = map(lambda x: x.strip(), attributes.split(";")) self.attributes = {} for f in fields: d = map(lambda x: x.strip(), f.split("=")) n, v = d[0], d[1] if len(d) > 2: v = d[1:] if v[0] == '"' and v[-1] == '"': v = v[1:-1] else: # try to convert to a value try: v = float(v) v = int(v) except __HOLE__: pass except TypeError: pass # The reversed Parent attribute can contain multiple, "," sepearted # values if n == "Parent": v = v.split(",") self.attributes[n] = v
ValueError
dataset/ETHPy150Open CGATOxford/cgat/CGAT/GFF3.py/Entry.parseInfo
1,598
def action(self): """ This class overrides this method """ commandline = "{0} {1}".format(self.command, " ".join(self.arguments)) try: completed_process = subprocess.run(commandline, shell=True) self.exit_status = completed_process.returncode except __HOLE__: self.exit_status = subprocess.call(commandline, shell=True)
AttributeError
dataset/ETHPy150Open pmbarrett314/curses-menu/cursesmenu/items/command_item.py/CommandItem.action
1,599
def _randrange(seed=None): """Return a randrange generator. ``seed`` can be o None - return randomly seeded generator o int - return a generator seeded with the int o list - the values to be returned will be taken from the list in the order given; the provided list is not modified. Examples ======== >>> from sympy.utilities.randtest import _randrange >>> rr = _randrange() >>> rr(1000) # doctest: +SKIP 999 >>> rr = _randrange(3) >>> rr(1000) # doctest: +SKIP 238 >>> rr = _randrange([0, 5, 1, 3, 4]) >>> rr(3), rr(3) (0, 1) """ if seed is None: return random.randrange elif isinstance(seed, int): return random.Random(seed).randrange elif is_sequence(seed): seed = list(seed) # make a copy seed.reverse() def give(a, b=None, seq=seed): if b is None: a, b = 0, a a, b = as_int(a), as_int(b) w = b - a if w < 1: raise ValueError('_randrange got empty range') try: x = seq.pop() except __HOLE__: raise ValueError('_randrange expects a list-like sequence') except IndexError: raise ValueError('_randrange sequence was too short') if a <= x < b: return x else: return give(a, b, seq) return give else: raise ValueError('_randrange got an unexpected seed')
AttributeError
dataset/ETHPy150Open sympy/sympy/sympy/utilities/randtest.py/_randrange