Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
6,900
|
def _get_scope(self, scope=None):
if scope is None:
try:
return self.parent.get_expr_scope()
except __HOLE__:
pass
return scope
|
AttributeError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/hasobjective.py/HasObjectives._get_scope
|
6,901
|
def test_default_expiry(self):
try:
self.assertEqual(self.context.get_expiry_age(), settings.WECHAT_CONTEXT_AGE)
except AttributeError:
self.assertEqual(self.context.get_expiry_age(), DEFAULT_WECHAT_CONTEXT_AGE)
self.context.set_expiry(0)
try:
self.assertEqual(self.context.get_expiry_age(), settings.WECHAT_CONTEXT_AGE)
except __HOLE__:
self.assertEqual(self.context.get_expiry_age(), DEFAULT_WECHAT_CONTEXT_AGE)
|
AttributeError
|
dataset/ETHPy150Open wechat-python-sdk/wechat-python-sdk/wechat_sdk/context/framework/django/tests.py/ContextTestsMixin.test_default_expiry
|
6,902
|
def test_custom_expiry_reset(self):
self.context.set_expiry(None)
self.context.set_expiry(10)
self.context.set_expiry(None)
try:
self.assertEqual(self.context.get_expiry_age(), settings.WECHAT_CONTEXT_AGE)
except __HOLE__:
self.assertEqual(self.context.get_expiry_age(), DEFAULT_WECHAT_CONTEXT_AGE)
|
AttributeError
|
dataset/ETHPy150Open wechat-python-sdk/wechat-python-sdk/wechat_sdk/context/framework/django/tests.py/ContextTestsMixin.test_custom_expiry_reset
|
6,903
|
def test_exit_arg(self):
"sys.exit can be called with args"
try:
sys.exit("leaving now")
except __HOLE__, e:
self.assertEquals(str(e), "leaving now")
|
SystemExit
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_sys_jy.py/SysTest.test_exit_arg
|
6,904
|
def test_tuple_args(self):
"Exceptions raised unpacking tuple args have right line number"
def tuple_args( (x,y) ): pass
try:
tuple_args( 10 )
except __HOLE__:
tb = sys.exc_info()[2]
if tb.tb_lineno == 0:
self.fail("Traceback lineno was zero")
|
TypeError
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_sys_jy.py/SysTest.test_tuple_args
|
6,905
|
def rename(source, target):
try:
_rename(source, target)
except __HOLE__:
e = extract_exception()
if e.errno == errno.EXDEV:
shutil.move(source, target)
else:
raise
|
OSError
|
dataset/ETHPy150Open cournape/Bento/bento/utils/os2.py/rename
|
6,906
|
def connection(self):
""" get a cached connection from the pool """
self._condition.acquire()
try:
if (self._maxconnections and self._connections >= self._maxconnections):
raise TooManyConnections("%d connections are already equal to the max: %d" % (self._connections, self._maxconnections))
# connection limit not reached, get a dedicated connection
try: # first try to get it from the idle cache
con = self._idle_cache.pop(0)
except __HOLE__: # else get a fresh connection
con = self.new_connection()
self._connections += 1
finally:
self._condition.release()
return con
|
IndexError
|
dataset/ETHPy150Open bitly/asyncmongo/asyncmongo/pool.py/ConnectionPool.connection
|
6,907
|
def qualmap(qualities, filename, fig_kw):
fig = plt.figure(**fig_kw)
ax = fig.add_subplot(111)
values = map(Counter, tuple(qualities.values()))
counts = Counter()
for value in values:
counts = counts + value
max_qual = max(tuple(counts.keys()))
max_pos = max(tuple(qualities.keys()))
heat_map = np.zeros((max_qual, max_pos))
for p in range(max_pos):
for q in range(max_qual):
try:
heat_map[q][p] = qualities[p+1][q+1]
except __HOLE__:
pass
imax = ax.imshow(np.array(heat_map), cmap=viridis_cm, origin='lower', interpolation='none', aspect='auto')
ax.axhline(y=10, linestyle=':', color='gray')
ax.axhline(y=20, linestyle=':', color='gray')
ax.axhline(y=30, linestyle=':', color='gray')
cbar = fig.colorbar(imax, orientation='horizontal', shrink=0.5)
cbar_labels = [item.get_text() for item in cbar.ax.get_xticklabels()]
cbar.ax.set_xticklabels(cbar_labels, rotation=45)
cbar.ax.set_title('')
ax.set_title('Quality score heatmap')
ax.set_xlabel('Cycle')
ax.set_ylabel('Sum of Phred qualities')
add_figure_to_archive(fig, filename, 'quality_score_heatmap.png')
|
KeyError
|
dataset/ETHPy150Open mdshw5/fastqp/fastqp/plots.py/qualmap
|
6,908
|
def main():
import sys
import warnings
try:
filename = sys.argv[1]
word_number = int(sys.argv[2])
except (__HOLE__, ValueError):
print('Usage: %s <file-name> <word-number>' % sys.argv[0])
sys.exit(1)
with open(filename, 'rt', encoding='utf-8') as text_file:
s = Sentence(text_file.read())
for n, word in enumerate(s, 1):
if n == word_number:
print(word)
break
else:
warnings.warn('last word is #%d, "%s"' % (n, word))
|
IndexError
|
dataset/ETHPy150Open fluentpython/example-code/14-it-generator/sentence_genexp.py/main
|
6,909
|
def __call__(self, environ, start_response):
req = Request(environ)
action = req.params.get('action', 'view')
page = self.get_page(req.path_info)
try:
try:
meth = getattr(self, 'action_%s_%s' % (action, req.method))
except __HOLE__:
raise exc.HTTPBadRequest('No such action %r' % action).exception
resp = meth(req, page)
except exc.HTTPException, e:
resp = e
return resp(environ, start_response)
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/webob_0_9/docs/wiki-example-code/example.py/WikiApp.__call__
|
6,910
|
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Panasonic Viera TV platform."""
from panasonic_viera import DEFAULT_PORT, RemoteControl
name = config.get(CONF_NAME, 'Panasonic Viera TV')
port = config.get(CONF_PORT, DEFAULT_PORT)
if discovery_info:
_LOGGER.debug('%s', discovery_info)
vals = discovery_info.split(':')
if len(vals) > 1:
port = vals[1]
host = vals[0]
remote = RemoteControl(host, port)
add_devices([PanasonicVieraTVDevice(name, remote)])
return True
# Validate that all required config options are given
if not validate_config({DOMAIN: config}, {DOMAIN: [CONF_HOST]}, _LOGGER):
return False
host = config.get(CONF_HOST, None)
remote = RemoteControl(host, port)
try:
remote.get_mute()
except (socket.timeout, TimeoutError, __HOLE__):
_LOGGER.error('Panasonic Viera TV is not available at %s:%d',
host, port)
return False
add_devices([PanasonicVieraTVDevice(name, remote)])
return True
# pylint: disable=abstract-method
|
OSError
|
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/media_player/panasonic_viera.py/setup_platform
|
6,911
|
def update(self):
"""Retrieve the latest data."""
try:
self._muted = self._remote.get_mute()
self._state = STATE_ON
except (socket.timeout, TimeoutError, __HOLE__):
self._state = STATE_OFF
return False
return True
|
OSError
|
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/media_player/panasonic_viera.py/PanasonicVieraTVDevice.update
|
6,912
|
def send_key(self, key):
"""Send a key to the tv and handles exceptions."""
try:
self._remote.send_key(key)
self._state = STATE_ON
except (socket.timeout, TimeoutError, __HOLE__):
self._state = STATE_OFF
return False
return True
|
OSError
|
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/media_player/panasonic_viera.py/PanasonicVieraTVDevice.send_key
|
6,913
|
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
volume = 0
try:
volume = self._remote.get_volume() / 100
self._state = STATE_ON
except (socket.timeout, TimeoutError, __HOLE__):
self._state = STATE_OFF
return volume
|
OSError
|
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/media_player/panasonic_viera.py/PanasonicVieraTVDevice.volume_level
|
6,914
|
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
volume = int(volume * 100)
try:
self._remote.set_volume(volume)
self._state = STATE_ON
except (socket.timeout, TimeoutError, __HOLE__):
self._state = STATE_OFF
|
OSError
|
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/media_player/panasonic_viera.py/PanasonicVieraTVDevice.set_volume_level
|
6,915
|
def coerce_types(content):
'''
Convert types in csv-like content.
The idea is that when translating to and
from csv, everything is converted to strings. So, we need to undo that
conversion for things like counts.
'''
if len(content) == 0:
return content
column_types = get_column_types(content)
coerced_content = []
for row in content:
c_row = []
for col_ind, col in enumerate(row):
try:
col = CONVERTERS[column_types[col_ind]](col)
except __HOLE__:
col = col
c_row.append(col)
coerced_content.append(c_row)
return coerced_content
|
ValueError
|
dataset/ETHPy150Open learntextvis/textkit/textkit/coerce.py/coerce_types
|
6,916
|
@classmethod
def check_repository(cls, path, username=None, password=None,
local_site_name=None):
"""
Performs checks on a repository to test its validity.
This should check if a repository exists and can be connected to.
This will also check if the repository requires an HTTPS certificate.
The result is returned as an exception. The exception may contain
extra information, such as a human-readable description of the problem.
If the repository is valid and can be connected to, no exception
will be thrown.
"""
super(BZRTool, cls).check_repository(path, username, password,
local_site_name)
if local_site_name and sshutils.is_ssh_uri(path):
path += '?rb-local-site-name=%s' % local_site_name
try:
tree, branch, repository, relpath = \
bzrdir.BzrDir.open_containing_tree_branch_or_repository(
path.encode('ascii'))
except __HOLE__:
raise RepositoryNotFoundError()
except NotBranchError:
raise RepositoryNotFoundError()
except Exception as e:
raise SCMError(e)
|
AttributeError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/scmtools/bzr.py/BZRTool.check_repository
|
6,917
|
def sample(self,wig=None,resolution=100):
"""Doing sampling"""
try:
chroms=wig.get_chroms()
except __HOLE__:
raise Exception("Argument 'wig' must be given")
coordinates={}
for chrom in chroms:
try:
standardchrom=self.standard_chroms[chrom]
except KeyError:
standardchrom=chrom
wigcoord=wig[chrom][0]
coordinates[standardchrom]=[]
for wc in wigcoord:
coordinate=(int(round(1.0*wc/resolution)))*resolution+1
if not coordinates[standardchrom] or coordinate!=coordinates[standardchrom][-1]:
coordinates[standardchrom].append(coordinate)
return coordinates
|
AttributeError
|
dataset/ETHPy150Open taoliu/taolib/Assoc/sampler.py/GenomeSampler.sample
|
6,918
|
def sample(self,bed=None,resolution=600):
"""Doing sampling"""
try:
chroms=bed.get_chroms()
except AttributeError:
raise Exception("Argument 'bed' must be given")
coordinates={}
for chrom in chroms:
try:
standardchrom=self.standard_chroms[chrom]
except __HOLE__:
standardchrom=chrom
coordinates[standardchrom]=[]
ChIP=zip(bed[chrom]['start'],bed[chrom]['end'],map(lambda x,y,:(x+y)/2,bed[chrom]['start'],bed[chrom]['end']))
howmanyChIPs=len(ChIP)
for i in xrange(0,howmanyChIPs):
# get the begining, end, and center of a peak
beg,end,center=ChIP[i]
### When using the real locations of beg
Ns=range(center,max(0,beg-1),-1*resolution)
Ns.reverse()
Ns+=range(center+resolution,end+1,resolution)
if Ns: coordinates[standardchrom].extend(Ns)
coordinates[standardchrom].sort()
return coordinates
|
KeyError
|
dataset/ETHPy150Open taoliu/taolib/Assoc/sampler.py/ChIPSampler.sample
|
6,919
|
def sample(self, wig, resolution):
"""Sample a wig file at the given resolution.
Parameters:
1. wig: a wig object (see inout.py)
2. resolution: sampling resolution
Return:
sampWig: the sampled wig object
"""
# parameter checking
try:
chroms=wig.get_chroms()
except __HOLE__:
raise Exception("Argument 'wig' must be given")
sampWig=Wig()
for chrom in chroms:
try:
standardchrom=self.standard_chroms[chrom]
except KeyError:
standardchrom=chrom
samp=[array('l',[]),array('d',[])]
for wc,val in itertools.izip(wig[chrom][0],wig[chrom][1]):
coordinate=(int(round(1.0*wc/resolution)))*resolution+1
if len(samp[0])==0:
samp[0].append(coordinate)
samp[1].append(val)
continue
if coordinate!=samp[0][-1]:
samp[0].append(coordinate)
samp[1].append(val)
#added to sampWig only if there are some point(s)
if samp[0]: sampWig.wig[standardchrom]=samp
return sampWig
|
AttributeError
|
dataset/ETHPy150Open taoliu/taolib/Assoc/sampler.py/WigSampler.sample
|
6,920
|
def sample(self, wig, resolution):
"""Sample a wig file at the given resolution.
Parameters:
1. wig: a wig object (see inout.py)
2. resolution: sampling resolution
Return:
sampWig: the sampled wig object
"""
# parameter checking
try:
chroms=wig.get_chroms()
except AttributeError:
raise Exception("Argument 'wig' must be given")
sampWig=Wig()
for chrom in chroms:
try:
standardchrom=self.standard_chroms[chrom]
except KeyError:
standardchrom=chrom
try:
start = wig[chrom][0][0]
end = wig[chrom][0][-1]
except IndexError:
continue
samp=[array('l',[]),array('d',[])]
cor = wig[chrom][0]
val = wig[chrom][1]
init = 0
prev = -1000
for sc in xrange(start, end, resolution):
# get the closest one to the sampled point and save
gotya = bisect_left(cor[init:], sc)
if prev == (init+gotya): continue
else: prev = (init+gotya)
try:
samp[0].append(cor[init+gotya])
samp[1].append(val[init+gotya])
init += gotya
except __HOLE__:
continue
#added to sampWig only if there are some point(s)
if samp[0]: sampWig.wig[standardchrom]=samp
return sampWig
|
IndexError
|
dataset/ETHPy150Open taoliu/taolib/Assoc/sampler.py/WigSamplerFast.sample
|
6,921
|
def tail(self, argv):
"""tail <fname> [<filter>]
tail a file, using the optional filter regular expression to filter lines."""
fname = argv[1]
if len(argv) > 2:
filt = argv[2]
else:
filt = None
s = self._obj
s.tail(fname, filt)
try:
while 1:
l = s.readline()
self._print(l)
except __HOLE__:
s.interrupt()
|
KeyboardInterrupt
|
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/QA/controller_cli.py/ConfiguratorShellCLI.tail
|
6,922
|
def configurator_cli(argv):
"""configurator_cli [-s <script>] [-g] <device>
Interact with a DUT configurator. If no device is specified use the testbed DUT.
Options:
-g Use paged output (like 'more')
-s <script> Run a CLI script from the given file instead of entering
interactive mode.
"""
import os
from pycopia import getopt
from pycopia.QA import configurator
from pycopia.QA import config
paged = False
script = None
try:
optlist, longopts, args = getopt.getopt(argv[1:], "s:?g")
except GetoptError:
print configurator_cli.__doc__
return
for opt, val in optlist:
if opt == "-?":
print configurator_cli.__doc__
return
elif opt == "-g":
paged = True
elif opt == "-s":
script = val
if not args:
print configurator_cli.__doc__
return
if paged:
from pycopia import tty
io = tty.PagedIO()
else:
io = IO.ConsoleIO()
# do runtime setup
cf = config.get_config(initdict=longopts)
cf.reportfile = "configurator_cli"
cf.logbasename = "configurator_cli.log"
cf.arguments = argv
dev = cf.devices[args[0]]
ctor = configurator.get_configurator(dev, logfile=cf.logfile)
# construct the CLI
theme = ConfiguratorTheme("Configurator> ")
ui = UI.UserInterface(io, cf, theme)
cmd = CLI.get_generic_cmd(ctor, ui, ConfiguratorShellCLI)
cmd.device = dev # stash actual device for future reference
parser = CLI.CommandParser(cmd, historyfile=os.path.expandvars("$HOME/.hist_configurator"))
if script:
try:
parser.parse(script)
except __HOLE__:
pass
else:
parser.interact()
try:
ctor.exit()
except:
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/QA/controller_cli.py/configurator_cli
|
6,923
|
def translate(self, instruction):
"""Return IR representation of an instruction.
"""
try:
trans_instrs = self._translate(instruction)
except __HOLE__ as e:
unkn_instr = self._builder.gen_unkn()
unkn_instr.address = instruction.address << 8 | (0x0 & 0xff)
trans_instrs = [unkn_instr]
self._log_not_supported_instruction(instruction, str(e))
except Exception:
self._log_translation_exception(instruction)
raise
# Some sanity check....
for instr in trans_instrs:
try:
check_operands_size(instr, self._arch_info.architecture_size)
except:
logger.error(
"Invalid operand size: %s (%s)",
instr,
instruction
)
raise
return trans_instrs
|
NotImplementedError
|
dataset/ETHPy150Open programa-stic/barf-project/barf/barf/arch/arm/armtranslator.py/ArmTranslator.translate
|
6,924
|
@classmethod
def request(cls, method, path, body=None, attach_host_name=False, response_formatter=None,
error_formatter=None, **params):
"""
Make an HTTP API request
:param method: HTTP method to use to contact API endpoint
:type method: HTTP method string
:param path: API endpoint url
:type path: url
:param body: dictionary to be sent in the body of the request
:type body: dictionary
:param response_formatter: function to format JSON response from HTTP API request
:type response_formatter: JSON input function
:param error_formatter: function to format JSON error response from HTTP API request
:type error_formatter: JSON input function
:param attach_host_name: link the new resource object to the host name
:type attach_host_name: bool
:param params: dictionary to be sent in the query string of the request
:type params: dictionary
:returns: JSON or formated response from HTTP API request
"""
try:
# Check if it's ok to submit
if not cls._should_submit():
raise HttpBackoff("Too many timeouts. Won't try again for {1} seconds."
.format(*cls._backoff_status()))
# Import API, User and HTTP settings
from datadog.api import _api_key, _application_key, _api_host, \
_mute, _host_name, _proxies, _max_retries, _timeout, \
_cacert
# Check keys and add then to params
if _api_key is None:
raise ApiNotInitialized("API key is not set."
" Please run 'initialize' method first.")
params['api_key'] = _api_key
if _application_key:
params['application_key'] = _application_key
# Construct the url
url = "%s/api/%s/%s" % (_api_host, cls._api_version, path.lstrip("/"))
# Attach host name to body
if attach_host_name and body:
# Is it a 'series' list of objects ?
if 'series' in body:
# Adding the host name to all objects
for obj_params in body['series']:
if obj_params.get('host', "") == "":
obj_params['host'] = _host_name
else:
if body.get('host', "") == "":
body['host'] = _host_name
# If defined, make sure tags are defined as a comma-separated string
if 'tags' in params and isinstance(params['tags'], list):
params['tags'] = ','.join(params['tags'])
# Process the body, if necessary
headers = {}
if isinstance(body, dict):
body = json.dumps(body)
headers['Content-Type'] = 'application/json'
# Process requesting
start_time = time.time()
try:
# Use a session to set a max_retries parameters
s = requests.Session()
http_adapter = requests.adapters.HTTPAdapter(max_retries=_max_retries)
s.mount('https://', http_adapter)
# Request
result = s.request(
method,
url,
headers=headers,
params=params,
data=body,
timeout=_timeout,
proxies=_proxies,
verify=_cacert)
result.raise_for_status()
except requests.ConnectionError as e:
raise ClientError("Could not request %s %s%s: %s" % (method, _api_host, url, e))
except requests.exceptions.Timeout as e:
cls._timeout_counter += 1
raise HttpTimeout('%s %s timed out after %d seconds.' % (method, url, _timeout))
except requests.exceptions.HTTPError as e:
if e.response.status_code in (400, 403, 404, 409):
# This gets caught afterwards and raises an ApiError exception
pass
else:
raise
except TypeError as e:
raise TypeError(
"Your installed version of 'requests' library seems not compatible with"
"Datadog's usage. We recommand upgrading it ('pip install -U requests')."
"If you need help or have any question, please contact support@datadoghq.com")
# Request succeeded: log it and reset the timeout counter
duration = round((time.time() - start_time) * 1000., 4)
log.info("%s %s %s (%sms)" % (result.status_code, method, url, duration))
cls._timeout_counter = 0
# Format response content
content = result.content
if content:
try:
if is_p3k():
response_obj = json.loads(content.decode('utf-8'))
else:
response_obj = json.loads(content)
except __HOLE__:
raise ValueError('Invalid JSON response: {0}'.format(content))
if response_obj and 'errors' in response_obj:
raise ApiError(response_obj)
else:
response_obj = None
if response_formatter is None:
return response_obj
else:
return response_formatter(response_obj)
except ClientError as e:
if _mute:
log.error(str(e))
if error_formatter is None:
return {'errors': e.args[0]}
else:
return error_formatter({'errors': e.args[0]})
else:
raise
except ApiError as e:
if _mute:
for error in e.args[0]['errors']:
log.error(str(error))
if error_formatter is None:
return e.args[0]
else:
return error_formatter(e.args[0])
else:
raise
# Private functions
|
ValueError
|
dataset/ETHPy150Open DataDog/datadogpy/datadog/api/base.py/HTTPClient.request
|
6,925
|
def memoize(fun):
"""A simple memoize decorator for functions supporting positional args."""
@wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
try:
return cache[key]
except __HOLE__:
ret = cache[key] = fun(*args, **kwargs)
return ret
cache = {}
return wrapper
|
KeyError
|
dataset/ETHPy150Open graphql-python/graphene/graphene/utils/caching.py/memoize
|
6,926
|
def read_corpus_list_file(self, file_name):
"""Read a corpus list file.
Each line has the format:
<count> <compound>
Yield tuples (count, compound, compound_atoms) for each compound.
"""
_logger.info("Reading corpus from list '%s'..." % file_name)
for line in self._read_text_file(file_name):
try:
count, compound = line.split(None, 1)
yield int(count), compound, self._split_atoms(compound)
except __HOLE__:
yield 1, line, self._split_atoms(line)
_logger.info("Done.")
|
ValueError
|
dataset/ETHPy150Open aalto-speech/morfessor/morfessor/io.py/MorfessorIO.read_corpus_list_file
|
6,927
|
def read_parameter_file(self, file_name):
"""Read learned or estimated parameters from a file"""
params = {}
line_re = re.compile(r'^(.*)\s*:\s*(.*)$')
for line in self._read_text_file(file_name):
m = line_re.match(line.rstrip())
if m:
key = m.group(1)
val = m.group(2)
try:
val = float(val)
except __HOLE__:
pass
params[key] = val
return params
|
ValueError
|
dataset/ETHPy150Open aalto-speech/morfessor/morfessor/io.py/MorfessorIO.read_parameter_file
|
6,928
|
def _read_text_file(self, file_name, raw=False):
"""Read a text file with the appropriate compression and encoding.
Comments and empty lines are skipped unless raw is True.
"""
inp = self._open_text_file_read(file_name)
try:
for line in inp:
line = line.rstrip()
if not raw and \
(len(line) == 0 or line.startswith(self.comment_start)):
continue
if self.lowercase:
yield line.lower()
else:
yield line
except __HOLE__:
if file_name == '-':
_logger.info("Finished reading from stdin")
return
else:
raise
|
KeyboardInterrupt
|
dataset/ETHPy150Open aalto-speech/morfessor/morfessor/io.py/MorfessorIO._read_text_file
|
6,929
|
def _find_encoding(self, *files):
"""Test default encodings on reading files.
If no encoding is given, this method can be used to test which
of the default encodings would work.
"""
test_encodings = ['utf-8', locale.getpreferredencoding()]
for encoding in test_encodings:
ok = True
for f in files:
if f == '-':
continue
try:
if f.endswith('.gz'):
file_obj = gzip.open(f, 'rb')
elif f.endswith('.bz2'):
file_obj = bz2.BZ2File(f, 'rb')
else:
file_obj = open(f, 'rb')
for _ in codecs.getreader(encoding)(file_obj):
pass
except __HOLE__:
ok = False
break
if ok:
_logger.info("Detected %s encoding" % encoding)
return encoding
raise UnicodeError("Can not determine encoding of input files")
|
UnicodeDecodeError
|
dataset/ETHPy150Open aalto-speech/morfessor/morfessor/io.py/MorfessorIO._find_encoding
|
6,930
|
def __init__(self, public_key_string, version_byte=None, verify=True):
""" Takes in a public key in hex format.
"""
# set the version byte
if version_byte:
self._version_byte = version_byte
self._charencoding, self._type = get_public_key_format(
public_key_string)
# extract the binary bitcoin key (compressed/uncompressed w magic byte)
self._bin_public_key = extract_bin_bitcoin_pubkey(public_key_string)
# extract the bin ecdsa public key (uncompressed, w/out a magic byte)
bin_ecdsa_public_key = extract_bin_ecdsa_pubkey(public_key_string)
if verify:
try:
# create the ecdsa key object
self._ecdsa_public_key = VerifyingKey.from_string(
bin_ecdsa_public_key, self._curve)
except __HOLE__ as e:
raise ValueError(_errors['IMPROPER_PUBLIC_KEY_FORMAT'])
|
AssertionError
|
dataset/ETHPy150Open blockstack/pybitcoin/pybitcoin/publickey.py/BitcoinPublicKey.__init__
|
6,931
|
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA,
PERMISSION_DENIED, NOT_LOGGED_IN)
@webapi_request_fields(
required=BaseReviewGeneralCommentResource.REQUIRED_CREATE_FIELDS,
optional=BaseReviewGeneralCommentResource.OPTIONAL_CREATE_FIELDS,
allow_unknown=True
)
def create(self, request, *args, **kwargs):
"""Creates a general comment on a review.
This will create a new comment on a review. The comment contains text
only.
"""
try:
review = resources.review.get_object(request, *args, **kwargs)
except __HOLE__:
return DOES_NOT_EXIST
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request.user)
new_comment = self.create_comment(fields=(), review=review, **kwargs)
review.general_comments.add(new_comment)
return 201, {
self.item_result_key: new_comment,
}
|
ObjectDoesNotExist
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/review_general_comment.py/ReviewGeneralCommentResource.create
|
6,932
|
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional=BaseReviewGeneralCommentResource.OPTIONAL_UPDATE_FIELDS,
allow_unknown=True
)
def update(self, request, *args, **kwargs):
"""Updates a general comment.
This can update the text or region of an existing comment. It
can only be done for comments that are part of a draft review.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
general_comment = self.get_object(request, *args, **kwargs)
except __HOLE__:
return DOES_NOT_EXIST
# Determine whether or not we're updating the issue status.
if self.should_update_issue_status(general_comment, **kwargs):
return self.update_issue_status(request, self, *args, **kwargs)
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
self.update_comment(general_comment, **kwargs)
return 200, {
self.item_result_key: general_comment,
}
|
ObjectDoesNotExist
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/review_general_comment.py/ReviewGeneralCommentResource.update
|
6,933
|
def test_oracle_raises_original_error(self):
class BadFormElement(TextBoxElement):
def value(self, layer=None, view=None):
raise AttributeError('Inner Error')
oracle = SettingsOracle({'bad_form': BadFormElement('str("text")')})
try:
oracle('bad_form')
assert False
except __HOLE__ as err:
assert 'Inner Error' in err.args
|
AttributeError
|
dataset/ETHPy150Open glue-viz/glue/glue/viewers/custom/qt/tests/test_custom_viewer.py/TestSettingsOracle.test_oracle_raises_original_error
|
6,934
|
def _host_power_action(self, req, host, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
try:
result = self.compute_api.host_power_action(context, host=host,
action=action)
except __HOLE__ as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
return {"host": host, "power_action": result}
|
NotImplementedError
|
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/openstack/contrib/hosts.py/HostController._host_power_action
|
6,935
|
def get_real_instance_class(self):
"""
Normally not needed.
If a non-polymorphic manager (like base_objects) has been used to
retrieve objects, then the real class/type of these objects may be
determined using this method.
"""
# the following line would be the easiest way to do this, but it produces sql queries
# return self.polymorphic_ctype.model_class()
# so we use the following version, which uses the ContentType manager cache.
# Note that model_class() can return None for stale content types;
# when the content type record still exists but no longer refers to an existing model.
try:
model = ContentType.objects.get_for_id(self.polymorphic_ctype_id).model_class()
except __HOLE__:
# Django <1.6 workaround
return None
# Protect against bad imports (dumpdata without --natural) or other
# issues missing with the ContentType models.
if model is not None \
and not issubclass(model, self.__class__) \
and not issubclass(model, self.__class__._meta.proxy_for_model):
raise RuntimeError("ContentType {0} for {1} #{2} does not point to a subclass!".format(
self.polymorphic_ctype_id, model, self.pk,
))
return model
|
AttributeError
|
dataset/ETHPy150Open chrisglass/django_polymorphic/polymorphic/models.py/PolymorphicModel.get_real_instance_class
|
6,936
|
def __init__(self, * args, ** kwargs):
"""Replace Django's inheritance accessor member functions for our model
(self.__class__) with our own versions.
We monkey patch them until a patch can be added to Django
(which would probably be very small and make all of this obsolete).
If we have inheritance of the form ModelA -> ModelB ->ModelC then
Django creates accessors like this:
- ModelA: modelb
- ModelB: modela_ptr, modelb, modelc
- ModelC: modela_ptr, modelb, modelb_ptr, modelc
These accessors allow Django (and everyone else) to travel up and down
the inheritance tree for the db object at hand.
The original Django accessors use our polymorphic manager.
But they should not. So we replace them with our own accessors that use
our appropriate base_objects manager.
"""
super(PolymorphicModel, self).__init__(*args, ** kwargs)
if self.__class__.polymorphic_super_sub_accessors_replaced:
return
self.__class__.polymorphic_super_sub_accessors_replaced = True
def create_accessor_function_for_model(model, accessor_name):
def accessor_function(self):
attr = model.base_objects.get(pk=self.pk)
return attr
return accessor_function
subclasses_and_superclasses_accessors = self._get_inheritance_relation_fields_and_models()
try:
from django.db.models.fields.related import ReverseOneToOneDescriptor, ForwardManyToOneDescriptor
except __HOLE__:
# django < 1.9
from django.db.models.fields.related import (
SingleRelatedObjectDescriptor as ReverseOneToOneDescriptor,
ReverseSingleRelatedObjectDescriptor as ForwardManyToOneDescriptor,
)
for name, model in subclasses_and_superclasses_accessors.items():
orig_accessor = getattr(self.__class__, name, None)
if type(orig_accessor) in [ReverseOneToOneDescriptor, ForwardManyToOneDescriptor]:
#print >>sys.stderr, '---------- replacing', name, orig_accessor, '->', model
setattr(self.__class__, name, property(create_accessor_function_for_model(model, name)))
|
ImportError
|
dataset/ETHPy150Open chrisglass/django_polymorphic/polymorphic/models.py/PolymorphicModel.__init__
|
6,937
|
def __init__(self, *xyz):
l = len(xyz)
if l == 1:
obj = xyz[0]
try:
xyz = obj.x, obj.y, obj.z
except AttributeError:
try:
xyz = obj['x'], obj['y'], obj['z']
except __HOLE__:
xyz = tuple(obj[:3])
elif l == 0:
xyz = (0, 0, 0)
elif l != 3:
raise ValueError('Wrong length: expected 3, got %s' % xyz)
super(Vector3, self).__init__(*xyz)
|
TypeError
|
dataset/ETHPy150Open SpockBotMC/SpockBot/spockbot/vector.py/Vector3.__init__
|
6,938
|
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except __HOLE__:
item[key] = data
return item
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/lib/xmltodict.py/_DictSAXHandler.push_data
|
6,939
|
def unparse(item, output=None, encoding='utf-8', **kwargs):
((key, value),) = item.items()
must_return = False
if output == None:
output = StringIO()
must_return = True
content_handler = XMLGenerator(output, encoding)
content_handler.startDocument()
_emit(key, value, content_handler, **kwargs)
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except __HOLE__: # pragma no cover
pass
return value
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/lib/xmltodict.py/unparse
|
6,940
|
def archive_month(request, year, month, queryset, date_field,
month_format='%b', template_name=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic monthly archive view.
Templates: ``<app_label>/<model_name>_archive_month.html``
Context:
date_list:
List of days in this month with objects
month:
(date) this month
next_month:
(date) the first day of the next month, or None if the next month is in the future
previous_month:
(date) the first day of the previous month
object_list:
list of objects published in the given month
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime("%s-%s" % (year, month), '%s-%s' % ('%Y', month_format))
date = datetime.date(*tt[:3])
except __HOLE__:
raise Http404
model = queryset.model
now = timezone.now()
# Calculate first and last day of month, for use in a date-range lookup.
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the month isn't in the past and future objects are requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
date_list = object_list.dates(date_field, 'day')
if not object_list and not allow_empty:
raise Http404
# Calculate the next month, if applicable.
if allow_future:
next_month = last_day
elif last_day <= datetime.date.today():
next_month = last_day
else:
next_month = None
# Calculate the previous month
if first_day.month == 1:
previous_month = first_day.replace(year=first_day.year-1,month=12)
else:
previous_month = first_day.replace(month=first_day.month-1)
if not template_name:
template_name = "%s/%s_archive_month.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'%s_list' % template_object_name: object_list,
'month': date,
'next_month': next_month,
'previous_month': previous_month,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/views/generic/date_based.py/archive_month
|
6,941
|
def archive_week(request, year, week, queryset, date_field,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic weekly archive view.
Templates: ``<app_label>/<model_name>_archive_week.html``
Context:
week:
(date) this week
object_list:
list of objects published in the given week
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime(year+'-0-'+week, '%Y-%w-%U')
date = datetime.date(*tt[:3])
except __HOLE__:
raise Http404
model = queryset.model
now = timezone.now()
# Calculate first and last day of week, for use in a date-range lookup.
first_day = date
last_day = date + datetime.timedelta(days=7)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the week isn't in the past and future objects aren't requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not object_list and not allow_empty:
raise Http404
if not template_name:
template_name = "%s/%s_archive_week.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'week': date,
})
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/views/generic/date_based.py/archive_week
|
6,942
|
def archive_day(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object',
mimetype=None, allow_future=False):
"""
Generic daily archive view.
Templates: ``<app_label>/<model_name>_archive_day.html``
Context:
object_list:
list of objects published that day
day:
(datetime) the day
previous_day
(datetime) the previous day
next_day
(datetime) the next day, or None if the current day is today
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except __HOLE__:
raise Http404
model = queryset.model
now = timezone.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not allow_empty and not object_list:
raise Http404
# Calculate the next day, if applicable.
if allow_future:
next_day = date + datetime.timedelta(days=1)
elif date < datetime.date.today():
next_day = date + datetime.timedelta(days=1)
else:
next_day = None
if not template_name:
template_name = "%s/%s_archive_day.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'day': date,
'previous_day': date - datetime.timedelta(days=1),
'next_day': next_day,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/views/generic/date_based.py/archive_day
|
6,943
|
def object_detail(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', object_id=None, slug=None,
slug_field='slug', template_name=None, template_name_field=None,
template_loader=loader, extra_context=None, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic detail view from year/month/day/slug or year/month/day/id structure.
Templates: ``<app_label>/<model_name>_detail.html``
Context:
object:
the object to be detailed
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = timezone.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise AttributeError("Generic detail view must be called with either an object_id or a slug/slugfield")
try:
obj = queryset.get(**lookup_kwargs)
except __HOLE__:
raise Http404("No %s found for" % model._meta.verbose_name)
if not template_name:
template_name = "%s/%s_detail.html" % (model._meta.app_label, model._meta.object_name.lower())
if template_name_field:
template_name_list = [getattr(obj, template_name_field), template_name]
t = template_loader.select_template(template_name_list)
else:
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
response = HttpResponse(t.render(c), mimetype=mimetype)
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.name))
return response
|
ObjectDoesNotExist
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/views/generic/date_based.py/object_detail
|
6,944
|
def tearDown(self):
# Attempt to drop any test databases that may have been created.
if self.dbname1:
dbutil.TESTSERVER.drop(self.dbname1)
if self.dbname2:
dbutil.TESTSERVER.drop(self.dbname2)
# Attempt to remove any test save files that may have been created.
if self.filename1:
filepath1 = os.path.join(self.SAVE_DIR, self.filename1)
try:
os.remove(filepath1)
except __HOLE__:
# Don't worry if it is not there: test may have bombed
# before creating the file.
pass
|
OSError
|
dataset/ETHPy150Open GeoscienceAustralia/agdc/tests/test_dbutil.py/TestServer.tearDown
|
6,945
|
def describe(self, source_field, desc_fields, desc_format, wrap_chars):
# Get strings for description fields.
desc_strings = []
for desc_field in desc_fields:
try:
getattr(self, desc_field)
except __HOLE__:
return 'Error at "{}" field of g:unite_bibtex_description_fields. Check your vimrc.'.format(desc_field)
desc_strings.append(getattr(self, desc_field))
# Insert the source field if not present in the description,
# and put brackets around it wherever it is.
source_string = u""
if source_field in desc_fields:
source_index = desc_fields.index(source_field)
desc_strings[source_index] = u'%s%s%s' % (wrap_chars[0], desc_strings[source_index], wrap_chars[1])
else:
if not source_field in ["combined","file"]:
source_string = u'%s%s%s' % (wrap_chars[0], getattr(self, source_field), wrap_chars[1])
return desc_format.format(*desc_strings) + source_string
|
AttributeError
|
dataset/ETHPy150Open rafaqz/citation.vim/python/citation_vim/item.py/Item.describe
|
6,946
|
def explore(mp, maxTransitions):
# some globals may not be needed; code only mutates collection *contents*,
# as in finished, deadend
global anames, states, graph, accepting, frontier, unsafe
anames = mp.anames
explored = list()
fsm = list() # list of transitions with mp states not state numbers
more_runs = True # TestSuite might have multiple runs
while more_runs:
initialState = mp.Current()
frontier.append(initialState)
states.append(initialState) # includes initial state even if no trans'ns
iInitial = states.index(initialState) # might already be there
runstarts.append(iInitial)
if mp.Accepting(): # initial state might be accepting even if no trans'ns
accepting.append(iInitial)
if not mp.StateInvariant():
unsafe.append(iInitial)
while frontier:
if len(graph) == maxTransitions:
break
current = frontier[0] # head, keep in mind current might lead nowhere
frontier = frontier[1:] # tail
icurrent = states.index(current) # might already be there
#print 'current %s' % current # DEBUG
#print ' frontier %s' % frontier # DEBUG
explored.append(current) # states we checked, some might lead nowhere
mp.Restore(deepcopy(current)) # assign state in mp, need deepcopy here
transitions = mp.EnabledTransitions(list()) # all actions, not cleanup
if not transitions: # terminal state, no enabled transitions
if icurrent in accepting:
finished.append(icurrent)
else:
deadend.append(icurrent)
# print 'current %s, transitions %s' % (current, transitions) # DEBUG
for (aname, args, result, next, next_properties) in transitions:
# EnabledTransitions doesn't return transitions where not statefilter
# if next_properties['statefilter']:
if len(graph) < maxTransitions:
if next not in explored and next not in frontier:
# append for breadth-first, push on head for depth-first
frontier.append(next) # frontier contents are already copies
transition = (current, (aname, args, result), next)
if transition not in fsm:
fsm.append(transition)
if current not in states:
states.append(current)
if next not in states:
states.append(next) # next might never be in explored
# icurrent = states.index(current) # might already be there
inext = states.index(next) # ditto
graph.append((icurrent, (aname,args,result), inext)) #tuple
if mp.Accepting() and icurrent not in accepting:
accepting.append(icurrent)
if not mp.StateInvariant() and icurrent not in unsafe:
unsafe.append(icurrent)
if next_properties['accepting'] and inext not in accepting:
accepting.append(inext)
if not next_properties['stateinvariant'] and inext not in unsafe:
unsafe.append(inext)
# TK likewise dead states ... ?
else: # found transition that will not be included in graph
frontier.insert(0,current) # not completely explored after all
# explored.remove(current) # not necessary
break
# end if < ntransitions else ...
# end for transitions
# end while frontier
# continue exploring test suite with multiple runs
more_runs = False
if mp.TestSuite:
try:
mp.Reset()
more_runs = True
except __HOLE__: # raised by TestSuite Reset after last run
pass # no more runs, we're done
# end while more_runs
|
StopIteration
|
dataset/ETHPy150Open jon-jacky/PyModel/pymodel/Analyzer.py/explore
|
6,947
|
def compare_dict(answer, result, decimal=7):
'''Returns true if two dictionaries are approximately equal. Returns false otherwise.'''
flat_answer = bc.utils.flatten(answer)
flat_result = bc.utils.flatten(result)
for key in flat_answer.keys():
if key not in flat_result.keys():
return False, "The key {} was not there.".format(key)
answer_v, result_v = flat_answer[key], flat_result[key]
if isinstance(answer_v, (float, int)) and isinstance(result_v, (float, int)):
try:
np.testing.assert_almost_equal(answer_v, result_v, decimal=decimal)
except __HOLE__:
err_msg = "The key {} produced a different result: expected {}, got {}.".format(key, answer_v, result_v)
return False, err_msg
elif answer_v != result_v:
return False, "The key {} produced a different result: expected {}, got {}.".format(key, answer_v, result_v)
return True, ""
|
AssertionError
|
dataset/ETHPy150Open yvesalexandre/bandicoot/bandicoot/tests/testing_tools.py/compare_dict
|
6,948
|
def main(self):
source = ogr.Open(self.args.input, False)
source_layer = source.GetLayer(0)
try:
shutil.rmtree(self.args.output)
except __HOLE__:
pass
driver = ogr.GetDriverByName('ESRI Shapefile')
dest = driver.CreateDataSource(self.args.output)
# TODO: should work with source geom_type
dest_layer = dest.CreateLayer('intersection', geom_type=ogr.wkbMultiPolygon)
for i in range(source_layer.GetLayerDefn().GetFieldCount()):
dest_layer.CreateField(source_layer.GetLayerDefn().GetFieldDefn(i))
mask_features = []
mask_boxes = []
geo = ogr.Open(self.args.mask, False)
layer = geo.GetLayer(0)
for feature in layer:
mask_features.append(feature)
mask_boxes.append(get_bounding_box(feature.GetGeometryRef()))
for feature in source_layer:
# Skip features that don't have any geometry
if not feature.GetGeometryRef():
continue
masked_feature = ogr.Feature(feature_def=source_layer.GetLayerDefn())
masked_feature.SetFrom(feature)
masked_geometry = None
for (i, mask_feature) in enumerate(mask_features):
bounding_box = mask_boxes[i]
if not feature.GetGeometryRef().Intersects(bounding_box):
continue
new_piece = feature.GetGeometryRef().Intersection(mask_feature.GetGeometryRef())
if new_piece:
if masked_geometry:
masked_geometry = masked_geometry.Union(new_piece)
else:
masked_geometry = new_piece
# Don't create features which have been completely excluded
if not masked_geometry:
continue
masked_feature.SetGeometryDirectly(masked_geometry)
dest_layer.CreateFeature(masked_feature)
|
OSError
|
dataset/ETHPy150Open onyxfish/ogrkit/ogrkit/utilities/intersection.py/OGRIntersection.main
|
6,949
|
def get_testcase(rowid):
try:
dbrow = webhelpers.dbsession.query(models.TestCase).get(int(rowid))
if dbrow is None:
raise framework.HttpErrorNotFound("No such testcase id.")
except __HOLE__:
raise framework.HttpErrorNotFound("Bad id value.")
return dbrow
|
ValueError
|
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/QA/webui/testcases.py/get_testcase
|
6,950
|
def genomic_signal(fn, kind):
"""
Factory function that makes the right class for the file format.
Typically you'll only need this function to create a new genomic signal
object.
:param fn: Filename
:param kind:
String. Format of the file; see
metaseq.genomic_signal._registry.keys()
"""
try:
klass = _registry[kind.lower()]
except __HOLE__:
raise ValueError(
'No support for %s format, choices are %s'
% (kind, _registry.keys()))
m = klass(fn)
m.kind = kind
return m
|
KeyError
|
dataset/ETHPy150Open daler/metaseq/metaseq/_genomic_signal.py/genomic_signal
|
6,951
|
def enter_env(self, new_env):
if self.quantity:
try:
existing = [item for item in new_env.inven if item.template == self.template][0]
existing.quantity += self.quantity
return
except __HOLE__:
pass
new_env.add_inven(self)
|
IndexError
|
dataset/ETHPy150Open genzgd/Lampost-Mud/lampost/model/article.py/Article.enter_env
|
6,952
|
def filter_value_to_python(self, value, field_name, filters, filter_expr,
filter_type):
value = super(ModelResource, self).filter_value_to_python(
value, field_name, filters, filter_expr, filter_type)
# If we are filtering on a GeometryApiField then we should try
# and convert this to a GEOSGeometry object. The conversion
# will fail if we don't have value JSON, so in that case we'll
# just return ``value`` as normal.
if isinstance(self.fields[field_name], GeometryApiField):
try:
value = GEOSGeometry(unquote(value))
except __HOLE__:
pass
return value
|
ValueError
|
dataset/ETHPy150Open django-tastypie/django-tastypie/tastypie/contrib/gis/resources.py/ModelResource.filter_value_to_python
|
6,953
|
def process_all(self):
"""
process all desired_results in the database
"""
self.lap_timer() # reset timer
q = self.query_pending_desired_results()
if self.interface.parallel_compile:
desired_results = []
thread_args = []
def compile_result(args):
interface, data, result_id = args
return interface.compile(data, result_id)
for dr in q.all():
if self.claim_desired_result(dr):
desired_results.append(dr)
thread_args.append((self.interface, dr.configuration.data, dr.id))
if len(desired_results) == 0:
return
thread_pool = ThreadPool(len(desired_results))
# print 'Compiling %d results' % len(thread_args)
try:
# Use map_async instead of map because of bug where keyboardinterrupts are ignored
# See http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
compile_results = thread_pool.map_async(compile_result,
thread_args).get(9999999)
except Exception:
# Need to kill other processes because only one thread receives
# exception
self.interface.kill_all()
raise
# print 'Running %d results' % len(thread_args)
for dr, compile_result in zip(desired_results, compile_results):
# Make sure compile was successful
self.run_desired_result(dr, compile_result, dr.id)
try:
self.interface.cleanup(dr.id)
except __HOLE__, e:
print e
# print 'Done!'
else:
for dr in q.all():
if self.claim_desired_result(dr):
self.run_desired_result(dr)
|
RuntimeError
|
dataset/ETHPy150Open jansel/opentuner/opentuner/measurement/driver.py/MeasurementDriver.process_all
|
6,954
|
def embed(self, url, **kwargs):
"""
The heart of the matter
"""
try:
# first figure out the provider
provider = self.provider_for_url(url)
except OEmbedMissingEndpoint:
raise
else:
try:
# check the database for a cached response, because of certain
# race conditions that exist with get_or_create(), do a filter
# lookup and just grab the first item
stored_match = StoredOEmbed.objects.filter(
match=url,
maxwidth=kwargs.get('maxwidth', None),
maxheight=kwargs.get('maxheight', None),
date_expires__gte=datetime.datetime.now())[0]
return OEmbedResource.create_json(stored_match.response_json)
except __HOLE__:
# query the endpoint and cache response in db
# prevent None from being passed in as a GET param
params = dict([(k, v) for k, v in kwargs.items() if v])
# request an oembed resource for the url
resource = provider.request_resource(url, **params)
try:
cache_age = int(resource.cache_age)
if cache_age < MIN_OEMBED_TTL:
cache_age = MIN_OEMBED_TTL
except:
cache_age = DEFAULT_OEMBED_TTL
date_expires = datetime.datetime.now() + datetime.timedelta(seconds=cache_age)
stored_oembed, created = StoredOEmbed.objects.get_or_create(
match=url,
maxwidth=kwargs.get('maxwidth', None),
maxheight=kwargs.get('maxheight', None))
stored_oembed.response_json = resource.json
stored_oembed.resource_type = resource.type
stored_oembed.date_expires = date_expires
if resource.content_object:
stored_oembed.content_object = resource.content_object
stored_oembed.save()
return resource
|
IndexError
|
dataset/ETHPy150Open worldcompany/djangoembed/oembed/sites.py/ProviderSite.embed
|
6,955
|
@register.filter
def monthname(value):
"""Returns the name of a month with the supplied numeric value."""
try:
value = int(value)
except:
pass
try:
return MONTHS[value]
except __HOLE__:
return value
|
KeyError
|
dataset/ETHPy150Open ithinksw/philo/philo/contrib/penfield/templatetags/penfield.py/monthname
|
6,956
|
@register.filter
def apmonthname(value):
"""Returns the Associated Press abbreviated month name for the supplied numeric value."""
try:
value = int(value)
except:
pass
try:
return MONTHS_AP[value]
except __HOLE__:
return value
|
KeyError
|
dataset/ETHPy150Open ithinksw/philo/philo/contrib/penfield/templatetags/penfield.py/apmonthname
|
6,957
|
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
# Stealth option -- 'load_initial_data' is used by the testing setup
# process to disable initial fixture loading.
load_initial_data = options.get('load_initial_data', True)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except __HOLE__, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 2:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 1:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 2:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 1:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
from django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
|
ImportError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/commands/syncdb.py/Command.handle_noargs
|
6,958
|
def set_parameter_bound(param, bound_option, extra_config=None):
"""
Update the default value of bounds.
.. warning :: This function mutates the input values.
Parameters
----------
param : dict
saving all the fitting values and their bounds
bound_option : str
define bound type
extra_config : dict
strategy-specific configuration
"""
strat_dict = _STRATEGY_REGISTRY[bound_option]
if extra_config is None:
extra_config = dict()
for k, v in six.iteritems(param):
if k == 'non_fitting_values':
continue
try:
v['bound_type'] = strat_dict[k]
except __HOLE__:
v['bound_type'] = extra_config.get(k, 'fixed')
# This dict is used to update the current parameter dict to dynamically change
# the input data and do the fitting. The user can adjust parameters such as
# position, width, area or branching ratio.
|
KeyError
|
dataset/ETHPy150Open scikit-beam/scikit-beam/skbeam/core/fitting/xrf_model.py/set_parameter_bound
|
6,959
|
def sign(*records):
""" Signs objects or data dictionary with your Recurly.js private key."""
if PRIVATE_KEY is None:
raise ValueError("Recurly.js private key is not set.")
records = list(records)
try:
data = records.pop() if type(records[-1]) is dict else {}
except __HOLE__:
data = {}
for record in records:
data[record.__class__.nodename] = record.__dict__
if 'timestamp' not in data:
data['timestamp'] = int(time.time())
if 'nonce' not in data:
data['nonce'] = re.sub(six.b('\W+'), six.b(''), base64.b64encode(os.urandom(32)))
unsigned = to_query(data)
signed = hmac.new(six.b(PRIVATE_KEY), six.b(unsigned), hashlib.sha1).hexdigest()
return '|'.join([signed, unsigned])
|
IndexError
|
dataset/ETHPy150Open recurly/recurly-client-python/recurly/js.py/sign
|
6,960
|
def get_key_pair(user_host, logger=None,
overwrite_cache=False, ignore_ssh=False):
"""
Returns RSA key containing both public and private keys for the user
identified in `user_host`. This can be an expensive operation, so
we avoid generating a new key pair whenever possible.
If ``~/.ssh/id_rsa`` exists and is private, that key is returned.
user_host: string
Format ``user@host``.
logger: :class:`logging.Logger`
Used for debug messages.
overwrite_cache: bool
If True, a new key is generated and forced into the cache of existing
known keys. Used for testing.
ignore_ssh: bool
If True, ignore any existing ssh id_rsa key file. Used for testing.
.. note::
To avoid unnecessary key generation, the public/private key pair for
the current user is stored in the private file ``~/.openmdao/keys``.
On Windows this requires the pywin32 extension. Also, the public
key is stored in ssh form in ``~/.openmdao/id_rsa.pub``.
"""
logger = logger or NullLogger()
with _KEY_CACHE_LOCK:
if overwrite_cache:
key_pair = _generate(user_host, logger)
_KEY_CACHE[user_host] = key_pair
return key_pair
# Look in previously generated keys.
try:
key_pair = _KEY_CACHE[user_host]
except __HOLE__:
# If key for current user (typical), check filesystem.
# TODO: file lock to protect from separate processes.
user, host = user_host.split('@')
if user == getpass.getuser():
current_user = True
key_pair = None
# Try to re-use SSH key. Exceptions should *never* be exercised!
if not ignore_ssh:
id_rsa = \
os.path.expanduser(os.path.join('~', '.ssh', 'id_rsa'))
if is_private(id_rsa):
try:
with open(id_rsa, 'r') as inp:
key_pair = RSA.importKey(inp.read())
except Exception as exc: #pragma no cover
logger.warning('ssh id_rsa import: %r', exc)
else:
generate = False
else: #pragma no cover
logger.warning('Ignoring insecure ssh id_rsa.')
if key_pair is None:
# Look for OpenMDAO key.
key_file = \
os.path.expanduser(os.path.join('~', '.openmdao', 'keys'))
if is_private(key_file):
try:
with open(key_file, 'rb') as inp:
key_pair = cPickle.load(inp)
except Exception:
generate = True
else:
generate = False
else:
logger.warning('Insecure keyfile! Regenerating keys.')
os.remove(key_file)
generate = True
# Difficult to run test as non-current user.
else: #pragma no cover
current_user = False
generate = True
if generate:
key_pair = _generate(user_host, logger)
if current_user:
key_dir = os.path.dirname(key_file)
if not os.path.exists(key_dir):
os.mkdir(key_dir)
# Save key pair in protected file.
if sys.platform == 'win32' and not HAVE_PYWIN32: #pragma no cover
logger.debug('No pywin32, not saving keyfile')
else:
make_private(key_dir) # Private while writing keyfile.
with open(key_file, 'wb') as out:
cPickle.dump(key_pair, out,
cPickle.HIGHEST_PROTOCOL)
try:
make_private(key_file)
# Hard to cause (recoverable) error here.
except Exception: #pragma no cover
os.remove(key_file) # Remove unsecured file.
raise
# Save public key in ssh form.
users = {user_host: key_pair.publickey()}
filename = os.path.join(key_dir, 'id_rsa.pub')
write_authorized_keys(users, filename, logger)
_KEY_CACHE[user_host] = key_pair
return key_pair
|
KeyError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.util/src/openmdao/util/publickey.py/get_key_pair
|
6,961
|
def make_private(path):
"""
Make `path` accessible only by 'owner'.
path: string
Path to file or directory to be made private.
.. note::
On Windows this requires the pywin32 extension.
"""
if sys.platform == 'win32': #pragma no cover
if not HAVE_PYWIN32:
raise ImportError('No pywin32')
# Find the SIDs for user and system.
username = win32api.GetUserNameEx(win32con.NameSamCompatible)
# Map Cygwin 'root' to 'Administrator'. Typically these are intended
# to be identical, but /etc/passwd might configure them differently.
if username.endswith('\\root'):
username = username.replace('\\root', '\\Administrator')
user, domain, type = win32security.LookupAccountName('', username)
system, domain, type = win32security.LookupAccountName('', 'System')
# Find the DACL part of the Security Descriptor for the file
sd = win32security.GetFileSecurity(path,
win32security.DACL_SECURITY_INFORMATION)
# Create a blank DACL and add the ACEs we want.
dacl = win32security.ACL()
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,
ntsecuritycon.FILE_ALL_ACCESS, user)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,
ntsecuritycon.FILE_ALL_ACCESS, system)
# Put our new DACL into the Security Descriptor and update the file
# with the updated SD.
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(path,
win32security.DACL_SECURITY_INFORMATION,
sd)
else:
# Normal chmod() works on test machines with ACLs enabled, but a user
# in the field reported a situation where it didn't. This code tries
# using libacl if it can. Doesn't seem to cause any problems, not
# verifed that it helps though.
try:
# From pylibacl, which requires 'libacl1-dev'.
import posix1e
except __HOLE__:
mode = 0700 if os.path.isdir(path) else 0600
os.chmod(path, mode) # Read/Write/Execute
else:
if os.path.isdir(path):
acl = posix1e.ACL(text='u::rwx,g::-,o::-')
else:
acl = posix1e.ACL(text='u::rw,g::-,o::-')
acl.applyto(path)
if not is_private(path):
raise RuntimeError("Can't make %r private" % path)
|
ImportError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.util/src/openmdao/util/publickey.py/make_private
|
6,962
|
def read_authorized_keys(filename=None, logger=None):
"""
Return dictionary of public keys, indexed by user, read from `filename`.
The file must be in ssh format, and only RSA keys are processed.
If the file is not private, then no keys are returned.
filename: string
File to read from. The default is ``~/.ssh/authorized_keys``.
logger: :class:`logging.Logger`
Used for log messages.
"""
if not filename:
filename = \
os.path.expanduser(os.path.join('~', '.ssh', 'authorized_keys'))
logger = logger or NullLogger()
if not os.path.exists(filename):
raise RuntimeError('%r does not exist' % filename)
if not is_private(filename):
if sys.platform != 'win32' or HAVE_PYWIN32:
raise RuntimeError('%r is not private' % filename)
else: #pragma no cover
logger.warning('Allowed users file %r is not private', filename)
errors = 0
keys = {}
with open(filename, 'r') as inp:
for line in inp:
line = line.rstrip()
sharp = line.find('#')
if sharp >= 0:
line = line[:sharp]
if not line:
continue
key_type, blank, rest = line.partition(' ')
if key_type != 'ssh-rsa':
logger.error('unsupported key type: %r', key_type)
errors += 1
continue
key_data, blank, user_host = rest.partition(' ')
if not key_data:
logger.error('bad line (missing key data):')
logger.error(line)
errors += 1
continue
try:
user, host = user_host.split('@')
except __HOLE__:
logger.error('bad line (require user@host):')
logger.error(line)
errors += 1
continue
logger.debug('user %r, host %r', user, host)
try:
ip_addr = socket.gethostbyname(host)
except socket.gaierror:
logger.warning('unknown host %r', host)
logger.warning(line)
data = base64.b64decode(key_data)
start = 0
name_len = _longint(data, start, 4)
start += 4
name = data[start:start+name_len]
if name != 'ssh-rsa':
logger.error('name error: %r vs. ssh-rsa', name)
logger.error(line)
errors += 1
continue
start += name_len
e_len = _longint(data, start, 4)
start += 4
e = _longint(data, start, e_len)
start += e_len
n_len = _longint(data, start, 4)
start += 4
n = _longint(data, start, n_len)
start += n_len
if start != len(data):
logger.error('length error: %d vs. %d', start, len(data))
logger.error(line)
errors += 1
continue
try:
pubkey = RSA.construct((n, e))
except Exception as exc:
logger.error('key construct error: %r', exc)
errors += 1
else:
keys[user_host] = pubkey
if errors:
raise RuntimeError('%d errors in %r, check log for details'
% (errors, filename))
return keys
|
ValueError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.util/src/openmdao/util/publickey.py/read_authorized_keys
|
6,963
|
def RenderObject(self, hunt, args):
runner = hunt.GetRunner()
context = runner.context
untyped_summary_part = dict(
state=hunt.Get(hunt.Schema.STATE),
hunt_name=context.args.hunt_name,
create_time=context.create_time,
expires=context.expires,
client_limit=context.args.client_limit,
client_rate=context.args.client_rate,
creator=context.creator,
description=context.args.description)
typed_summary_part = {}
if args.with_full_summary:
all_clients_count, completed_clients_count, _ = hunt.GetClientsCounts()
untyped_summary_part.update(dict(
stats=context.usage_stats,
all_clients_count=all_clients_count,
completed_clients_count=completed_clients_count,
outstanding_clients_count=(
all_clients_count - completed_clients_count)))
typed_summary_part = dict(
regex_rules=runner.args.regex_rules or [],
integer_rules=runner.args.integer_rules or [],
args=hunt.state.args)
try:
typed_summary_part["client_rule_set"] = runner.args.client_rule_set
except __HOLE__:
typed_summary_part["client_rule_set"] = []
for k, v in untyped_summary_part.items():
untyped_summary_part[k] = api_value_renderers.RenderValue(v)
for k, v in typed_summary_part.items():
typed_summary_part[k] = api_value_renderers.RenderValue(v)
rendered_object = {
"summary": dict(untyped_summary_part.items() +
typed_summary_part.items())
}
return rendered_object
|
AttributeError
|
dataset/ETHPy150Open google/grr/grr/gui/api_plugins/hunt.py/ApiGRRHuntRenderer.RenderObject
|
6,964
|
def RenderFiltered(self, filter_func, args, token):
fd = aff4.FACTORY.Open("aff4:/hunts", mode="r", token=token)
children = list(fd.ListChildren())
children.sort(key=operator.attrgetter("age"), reverse=True)
if not args.active_within:
raise ValueError("active_within filter has to be used when "
"any kind of filtering is done (to prevent "
"queries of death)")
min_age = rdfvalue.RDFDatetime().Now() - args.active_within
active_children = []
for child in children:
if child.age > min_age:
active_children.append(child)
else:
break
index = 0
hunt_list = []
active_children_map = {}
for hunt in fd.OpenChildren(children=active_children):
if (not isinstance(hunt, hunts.GRRHunt) or not hunt.state
or not filter_func(hunt)):
continue
active_children_map[hunt.urn] = hunt
for urn in active_children:
try:
hunt = active_children_map[urn]
except __HOLE__:
continue
if index >= args.offset:
hunt_list.append(hunt)
index += 1
if args.count and len(hunt_list) >= args.count:
break
return ApiListHuntsResult(items=self._RenderHuntList(hunt_list))
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/gui/api_plugins/hunt.py/ApiListHuntsHandler.RenderFiltered
|
6,965
|
def Handle(self, args, token=None):
# TODO(user): handle cases when hunt doesn't exists.
# TODO(user): Use hunt's logs_collection_urn to open logs collection.
try:
logs_collection = aff4.FACTORY.Open(
HUNTS_ROOT_PATH.Add(args.hunt_id).Add("Logs"),
aff4_type=flow_runner.FlowLogCollection.__name__, mode="r",
token=token)
except __HOLE__:
logs_collection = aff4.FACTORY.Create(
HUNTS_ROOT_PATH.Add(args.hunt_id).Add("Logs"),
aff4_type="RDFValueCollection", mode="r", token=token)
result = api_call_handler_utils.FilterAff4Collection(
logs_collection, args.offset, args.count, args.filter)
return ApiListHuntLogsResult(
items=result, total_count=len(logs_collection))
|
IOError
|
dataset/ETHPy150Open google/grr/grr/gui/api_plugins/hunt.py/ApiListHuntLogsHandler.Handle
|
6,966
|
def Handle(self, args, token=None):
"""Retrieves the clients for a hunt."""
hunt_urn = HUNTS_ROOT_PATH.Add(args.hunt_id)
hunt = aff4.FACTORY.Open(hunt_urn, aff4_type="GRRHunt", token=token)
clients_by_status = hunt.GetClientsByStatus()
hunt_clients = clients_by_status[args.client_status.name]
total_count = len(hunt_clients)
if args.count:
hunt_clients = sorted(hunt_clients)[args.offset:args.offset+args.count]
else:
hunt_clients = sorted(hunt_clients)[args.offset:]
all_flow_urns = hunts.GRRHunt.GetAllSubflowUrns(
hunt_urn, hunt_clients, token)
flow_requests = flow.GRRFlow.GetFlowRequests(all_flow_urns, token)
client_requests = aff4_grr.VFSGRRClient.GetClientRequests(
hunt_clients, token)
waitingfor = {}
status_by_request = {}
for flow_urn in flow_requests:
for obj in flow_requests[flow_urn]:
if isinstance(obj, rdf_flows.RequestState):
waitingfor.setdefault(flow_urn, obj)
if waitingfor[flow_urn].id > obj.id:
waitingfor[flow_urn] = obj
elif isinstance(obj, rdf_flows.GrrMessage):
status_by_request.setdefault(flow_urn, {})[obj.request_id] = obj
response_urns = []
for request_base_urn, request in waitingfor.iteritems():
response_urns.append(rdfvalue.RDFURN(request_base_urn).Add(
"request:%08X" % request.id))
response_dict = dict(data_store.DB.MultiResolvePrefix(
response_urns, "flow:", token=token))
result_items = []
for flow_urn in sorted(all_flow_urns):
request_urn = flow_urn.Add("state")
client_id = flow_urn.Split()[2]
item = ApiHuntClient()
item.client_id = client_id
item.flow_urn = flow_urn
try:
request_obj = waitingfor[request_urn]
except __HOLE__:
request_obj = None
if request_obj:
response_urn = rdfvalue.RDFURN(request_urn).Add(
"request:%08X" % request_obj.id)
responses_available = len(response_dict.setdefault(response_urn, []))
status_available = False
responses_expected = "Unknown"
if request_obj.id in status_by_request.setdefault(request_urn, {}):
status_available = True
status = status_by_request[request_urn][request_obj.id]
responses_expected = status.response_id
client_requests_available = 0
for client_req in client_requests.setdefault(client_id, []):
if request_obj.request.session_id == client_req.session_id:
client_requests_available += 1
item.incomplete_request_id = str(request_obj.id)
item.next_state = request_obj.next_state
item.expected_args = request_obj.request.args_rdf_name
item.available_responses_count = responses_available
item.expected_responses = responses_expected
item.is_status_available = status_available
item.available_client_requests_count = client_requests_available
result_items.append(item)
return ApiGetHuntClientsResult(
items=result_items, total_count=total_count)
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/gui/api_plugins/hunt.py/ApiGetHuntClientsHandler.Handle
|
6,967
|
def prepare(self):
'''
Run the preparation sequence required to start a salt-api daemon.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(SaltAPI, self).prepare()
try:
if self.config['verify_env']:
logfile = self.config['log_file']
if logfile is not None and not logfile.startswith(('tcp://',
'udp://',
'file://')):
# Logfile is not using Syslog, verify
current_umask = os.umask(0o027)
verify_files([logfile], self.config['user'])
os.umask(current_umask)
except __HOLE__ as err:
log.exception('Failed to prepare salt environment')
self.shutdown(err.errno)
self.setup_logfile_logger()
verify_log(self.config)
log.info('Setting up the Salt API')
self.api = salt.client.netapi.NetapiClient(self.config)
self.daemonize_if_required()
self.set_pidfile()
|
OSError
|
dataset/ETHPy150Open saltstack/salt/salt/cli/api.py/SaltAPI.prepare
|
6,968
|
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
elif text is None and not limit and not attrs and not kwargs:
# Optimization to find all tags.
if name is True or name is None:
return [element for element in generator
if isinstance(element, Tag)]
# Optimization to find all tags with a given name.
elif isinstance(name, basestring):
return [element for element in generator
if isinstance(element, Tag) and element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except __HOLE__:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
|
StopIteration
|
dataset/ETHPy150Open mtlevolio/pylinkchecker/pylinkchecker/bs4/element.py/PageElement._find_all
|
6,969
|
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except __HOLE__:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
|
StopIteration
|
dataset/ETHPy150Open mtlevolio/pylinkchecker/pylinkchecker/bs4/element.py/Tag.select
|
6,970
|
def _ssh_mkdir(self, *args):
directory = join(*args)
self.debug('Create remote directory {}'.format(directory))
try:
self._sftp_client.mkdir(directory)
except IOError:
# already created?
try:
self._sftp_client.stat(directory)
except __HOLE__:
self.error('Unable to create remote directory {}'.format(directory))
raise
|
IOError
|
dataset/ETHPy150Open kivy/buildozer/buildozer/scripts/remote.py/BuildozerRemote._ssh_mkdir
|
6,971
|
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps. (this is fixed by now, but leaving it here
# for people using 0.96 or older trunk (pre [5919]) versions.
from django.db.models.loading import get_models, get_apps
loaded_models = get_models()
use_ipython = options.get('ipython', False)
use_plain = options.get('plain', False)
use_pythonrc = not options.get('no_pythonrc', True)
if options.get("print_sql", False):
# Code from http://gist.github.com/118990
from django.db.backends import util
try:
import sqlparse
except ImportError:
sqlparse = None
class PrintQueryWrapper(util.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
execution_time = time.time() - starttime
if sqlparse:
print sqlparse.format(raw_sql, reindent=True)
else:
print raw_sql
print
print 'Execution time: %.6fs' % execution_time
print
util.CursorDebugWrapper = PrintQueryWrapper
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
from django.conf import settings
imported_objects = {'settings': settings}
dont_load_cli = options.get('dont_load') # optparse will set this to [] if it doensnt exists
dont_load_conf = getattr(settings, 'SHELL_PLUS_DONT_LOAD', [])
dont_load = dont_load_cli + dont_load_conf
model_aliases = getattr(settings, 'SHELL_PLUS_MODEL_ALIASES', {})
for app_mod in get_apps():
app_models = get_models(app_mod)
if not app_models:
continue
app_name = app_mod.__name__.split('.')[-2]
if app_name in dont_load:
continue
app_aliases = model_aliases.get(app_name, {})
model_labels = []
for model in app_models:
try:
imported_object = getattr(__import__(app_mod.__name__, {}, {}, model.__name__), model.__name__)
model_name = model.__name__
if "%s.%s" % (app_name, model_name) in dont_load:
continue
alias = app_aliases.get(model_name, model_name)
imported_objects[alias] = imported_object
if model_name == alias:
model_labels.append(model_name)
else:
model_labels.append("%s (as %s)" % (model_name, alias))
except AttributeError, e:
print self.style.ERROR("Failed to import '%s' from '%s' reason: %s" % (model.__name__, app_name, str(e)))
continue
print self.style.SQL_COLTYPE("From '%s' autoload: %s" % (app_mod.__name__.split('.')[-2], ", ".join(model_labels)))
try:
if use_plain:
# Don't bother loading B/IPython, because the user wants plain Python.
raise ImportError
try:
if use_ipython:
# User wants IPython
raise ImportError
from bpython import embed
embed(imported_objects)
except ImportError:
try:
from IPython import embed
embed(user_ns=imported_objects)
except ImportError:
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
try:
from IPython.Shell import IPShell
shell = IPShell(argv=[], user_ns=imported_objects)
shell.mainloop()
except __HOLE__:
# IPython not found at all, raise ImportError
raise
except ImportError:
# Using normal Python shell
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if use_pythonrc:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
import user
code.interact(local=imported_objects)
|
ImportError
|
dataset/ETHPy150Open mozilla/inventory/vendor-local/src/django-extensions/build/lib/django_extensions/management/commands/shell_plus.py/Command.handle_noargs
|
6,972
|
def __iter__(self):
"""
Iterate through the records by pulling them off the server a page
at a time.
Currently set to do DEFAULT_PAGE_SIZE records at a time as per spec.
"""
x = 0
while True:
print "__iter__: ", self.detail, x, DEFAULT_PAGE_SIZE
theList = self.manager.createListP(self.detail, x, DEFAULT_PAGE_SIZE)
if theList:
i = 0
while True:
try:
yield theList[i]
i += 1
except __HOLE__:
x += i
break
else:
break
raise StopIteration
|
IndexError
|
dataset/ETHPy150Open rackerlabs/python-cloudservers/com/rackspace/cloud/servers/api/client/entitylist.py/EntityList.__iter__
|
6,973
|
def _shelve(self, obj, name='1.html'):
"""Writes given object into a file on desktop. (For debug purposes only ;) """
fo = False
path = os.path.expanduser(os.path.join('~', 'Desktop', name))
# Cleaning existing file
try:
with open(path):
os.remove(path)
pass
except __HOLE__:
pass
# Dumping object into file
try:
fo = open(path, 'w')
except Exception, e:
print e
pass
if fo:
fo.writelines(obj)
print 'file %s written' % name
|
IOError
|
dataset/ETHPy150Open adlibre/Adlibre-DMS/adlibre_dms/libraries/adlibre/dms/base_test.py/DMSTestCase._shelve
|
6,974
|
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except __HOLE__: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
|
AttributeError
|
dataset/ETHPy150Open kennethreitz/requests/requests/packages/urllib3/connectionpool.py/HTTPConnectionPool._get_conn
|
6,975
|
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except __HOLE__:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s",
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
|
AttributeError
|
dataset/ETHPy150Open kennethreitz/requests/requests/packages/urllib3/connectionpool.py/HTTPConnectionPool._put_conn
|
6,976
|
def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except __HOLE__: # Python 2.6 and older
httplib_response = conn.getresponse()
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s", method, url, http_version,
httplib_response.status, httplib_response.length)
try:
assert_header_parsing(httplib_response.msg)
except HeaderParsingError as hpe: # Platform-specific: Python 3
log.warning(
'Failed to parse headers (url=%s): %s',
self._absolute_url(url), hpe, exc_info=True)
return httplib_response
|
TypeError
|
dataset/ETHPy150Open kennethreitz/requests/requests/packages/urllib3/connectionpool.py/HTTPConnectionPool._make_request
|
6,977
|
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except __HOLE__: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
conn.connect()
|
AttributeError
|
dataset/ETHPy150Open kennethreitz/requests/requests/packages/urllib3/connectionpool.py/HTTPSConnectionPool._prepare_proxy
|
6,978
|
def _bootstrapSPIDev(self):
import os.path
import sys
if self.use_py_spi:
try:
import spidev
except:
error = "Unable to import spidev. Please install. pip install spidev"
log.error(error)
raise ImportError(error)
if not os.path.exists(self.dev):
error = "Cannot find SPI device. Please see https://github.com/maniacallabs/bibliopixel/wiki/SPI-Setup for details."
log.error(error)
raise IOError(error)
# permissions check
try:
open(self.dev)
except __HOLE__ as e:
if e.errno == 13:
error = "Cannot find SPI device. Please see https://github.com/maniacallabs/bibliopixel/wiki/SPI-Setup for details."
log.error(error)
raise IOError(error)
else:
raise e
|
IOError
|
dataset/ETHPy150Open ManiacalLabs/BiblioPixel/bibliopixel/drivers/spi_driver_base.py/DriverSPIBase._bootstrapSPIDev
|
6,979
|
def getBuilderWindow():
""" getBuilderWindow() -> QMainWindow
Return the current builder window of VisTrails if exists
"""
try:
return QtCore.QCoreApplication.instance().builderWindow
except __HOLE__:
return None
|
AttributeError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/gui/utils.py/getBuilderWindow
|
6,980
|
def test_send_error(_socket, conn):
conn.connect()
assert conn.state is ConnectionStates.CONNECTED
req = MetadataRequest[0]([])
try:
_socket.send.side_effect = ConnectionError
except __HOLE__:
_socket.send.side_effect = socket.error
f = conn.send(req)
assert f.failed() is True
assert isinstance(f.exception, Errors.ConnectionError)
assert _socket.close.call_count == 1
assert conn.state is ConnectionStates.DISCONNECTED
|
NameError
|
dataset/ETHPy150Open dpkp/kafka-python/test/test_conn.py/test_send_error
|
6,981
|
@mock.patch('uvcclient.store.get_info_store')
@mock.patch('uvcclient.camera.UVCCameraClient')
def test_login_tries_both_addrs_and_caches(self, mock_camera, mock_store):
""""Test the login tries."""
responses = [0]
def fake_login(*a):
try:
responses.pop(0)
raise socket.error
except __HOLE__:
pass
mock_store.return_value.get_camera_password.return_value = None
mock_camera.return_value.login.side_effect = fake_login
self.uvc._login()
self.assertEqual(2, mock_camera.call_count)
self.assertEqual('host-b', self.uvc._connect_addr)
mock_camera.reset_mock()
self.uvc._login()
mock_camera.assert_called_once_with('host-b', 'admin', 'ubnt')
mock_camera.return_value.login.assert_called_once_with()
|
IndexError
|
dataset/ETHPy150Open home-assistant/home-assistant/tests/components/camera/test_uvc.py/TestUVC.test_login_tries_both_addrs_and_caches
|
6,982
|
def test_camera_image_reauths(self):
""""Test the re-authentication."""
responses = [0]
def fake_snapshot():
try:
responses.pop()
raise camera.CameraAuthError()
except __HOLE__:
pass
return 'image'
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = fake_snapshot
with mock.patch.object(self.uvc, '_login') as mock_login:
self.assertEqual('image', self.uvc.camera_image())
mock_login.assert_called_once_with()
self.assertEqual([], responses)
|
IndexError
|
dataset/ETHPy150Open home-assistant/home-assistant/tests/components/camera/test_uvc.py/TestUVC.test_camera_image_reauths
|
6,983
|
@click.command(short_help='Mask in raster using features.')
@cligj.files_inout_arg
@options.output_opt
@click.option('-j', '--geojson-mask', 'geojson_mask',
type=click.Path(), default=None,
help='GeoJSON file to use for masking raster. Use "-" to read '
'from stdin. If not provided, original raster will be '
'returned')
@cligj.format_opt
@options.all_touched_opt
@click.option('--crop', is_flag=True, default=False,
help='Crop output raster to the extent of the geometries. '
'GeoJSON must overlap input raster to use --crop')
@click.option('-i', '--invert', is_flag=True, default=False,
help='Inverts the mask, so that areas covered by features are'
'masked out and areas not covered are retained. Ignored '
'if using --crop')
@options.force_overwrite_opt
@options.creation_options
@click.pass_context
def mask(
ctx,
files,
output,
geojson_mask,
driver,
all_touched,
crop,
invert,
force_overwrite,
creation_options):
"""Masks in raster using GeoJSON features (masks out all areas not covered
by features), and optionally crops the output raster to the extent of the
features. Features are assumed to be in the same coordinate reference
system as the input raster.
GeoJSON must be the first input file or provided from stdin:
> rio mask input.tif output.tif --geojson-mask features.json
> rio mask input.tif output.tif --geojson-mask - < features.json
If the output raster exists, it will be completely overwritten with the
results of this operation.
The result is always equal to or within the bounds of the input raster.
--crop and --invert options are mutually exclusive.
--crop option is not valid if features are completely outside extent of
input raster.
"""
from rasterio.mask import mask as mask_tool
from rasterio.features import bounds as calculate_bounds
verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
output, files = resolve_inout(
files=files, output=output, force_overwrite=force_overwrite)
input = files[0]
if geojson_mask is None:
click.echo('No GeoJSON provided, INPUT will be copied to OUTPUT',
err=True)
shutil.copy(input, output)
return
if crop and invert:
click.echo('Invert option ignored when using --crop', err=True)
invert = False
with Env(CPL_DEBUG=verbosity > 2) as env:
try:
with click.open_file(geojson_mask) as fh:
geojson = json.loads(fh.read())
except ValueError:
raise click.BadParameter('GeoJSON could not be read from '
'--geojson-mask or stdin',
param_hint='--geojson-mask')
if 'features' in geojson:
geometries = [f['geometry'] for f in geojson['features']]
elif 'geometry' in geojson:
geometries = (geojson['geometry'], )
else:
raise click.BadParameter('Invalid GeoJSON', param=input,
param_hint='input')
with rasterio.open(input) as src:
try:
out_image, out_transform = mask_tool(src, geometries,
crop=crop, invert=invert,
all_touched=all_touched)
except __HOLE__ as e:
if e.args[0] == 'Input shapes do not overlap raster.':
if crop:
raise click.BadParameter('not allowed for GeoJSON '
'outside the extent of the '
'input raster',
param=crop,
param_hint='--crop')
meta = src.meta.copy()
meta.update(**creation_options)
meta.update({
'driver': driver,
'height': out_image.shape[1],
'width': out_image.shape[2],
'transform': out_transform
})
with rasterio.open(output, 'w', **meta) as out:
out.write(out_image)
|
ValueError
|
dataset/ETHPy150Open mapbox/rasterio/rasterio/rio/mask.py/mask
|
6,984
|
def __call__(self, name):
try:
return self.mapping[name]
except __HOLE__:
raise KeyError(
'missing key. add type for "{0}" in self.mapping'.format(
name))
|
KeyError
|
dataset/ETHPy150Open ActiveState/applib/applib/_simpledb.py/_get_best_column_type.__call__
|
6,985
|
def classifier_test(self):
player = self.player()
classifier = dict()
for key in ['stochastic',
'inspects_source', 'manipulates_source',
'manipulates_state']:
classifier[key] = (any(t.classifier[key] for t in player.team))
classifier['memory_depth'] = float('inf')
for t in player.team:
try:
classifier['makes_use_of'].update(t.classifier['makes_use_of'])
except __HOLE__:
pass
for key in classifier:
self.assertEqual(player.classifier[key],
classifier[key],
msg="%s - Behaviour: %s != Expected Behaviour: %s" %
(key, player.classifier[key], classifier[key]))
|
KeyError
|
dataset/ETHPy150Open Axelrod-Python/Axelrod/axelrod/tests/unit/test_meta.py/TestMetaPlayer.classifier_test
|
6,986
|
def post_process(self, response):
try:
rjson = response.json()
except __HOLE__:
print response.text
raise
if rjson and ("exc" in rjson) and rjson["exc"]:
raise FrappeException(rjson["exc"])
if 'message' in rjson:
return rjson['message']
elif 'data' in rjson:
return rjson['data']
else:
return None
|
ValueError
|
dataset/ETHPy150Open frappe/frappe/frappe/frappeclient.py/FrappeClient.post_process
|
6,987
|
def __hash__(self):
"""
Allows states to be keys of dictionaries.
"""
for i, state in enumerate(self.agentStates):
try:
int(hash(state))
except __HOLE__ as e:
print e
# hash(state)
return (
int((hash(tuple(self.agentStates)) + 13 * hash(self.food) + 113 *
hash(tuple(self.capsules)) + 7 * hash(self.score)) % 1048575)
)
|
TypeError
|
dataset/ETHPy150Open rlpy/rlpy/rlpy/Domains/PacmanPackage/game.py/GameStateData.__hash__
|
6,988
|
def test_cluster_add_drop_recount(self):
client = pyorient.OrientDB("localhost", 2424) # TEST COMMANDS
client.connect( "root", "root" )
db_name = 'test_commands'
exists = client.db_exists(db_name, pyorient.STORAGE_TYPE_MEMORY)
print("Before %r" % exists)
try:
client.db_drop(db_name)
assert True
except pyorient.PyOrientCommandException as e:
print(str(e))
finally:
client.db_create(db_name, pyorient.DB_TYPE_GRAPH,
pyorient.STORAGE_TYPE_MEMORY)
cluster_info = client.db_open(
db_name, "admin", "admin", pyorient.DB_TYPE_GRAPH, ""
)
# CLUSTERS
new_cluster_id = client.data_cluster_add(
'my_cluster_1234567', pyorient.CLUSTER_TYPE_PHYSICAL
)
assert new_cluster_id > 0
new_cluster_list = client.db_reload()
new_cluster_list.sort(key=lambda cluster: cluster.id)
_list = []
for cluster in new_cluster_list:
print("Cluster Name: %s, ID: %u " % (cluster.name, cluster.id))
value = client.data_cluster_data_range(cluster.id)
print("Value: %s " % value)
_list.append(cluster.id)
assert value is not []
assert value is not None
# check for new cluster in database
try:
_list.index(new_cluster_id)
print("New cluster %r found in reload." % new_cluster_id)
assert True
except __HOLE__:
assert False
# delete the new cluster TODO: broken test
print("Drop Cluster ID: %r" % new_cluster_id)
drop_cluster = client.data_cluster_drop(new_cluster_id)
assert drop_cluster is True
|
ValueError
|
dataset/ETHPy150Open mogui/pyorient/tests/test_new_Iface.py/CommandTestCase.test_cluster_add_drop_recount
|
6,989
|
def post(self, request, *args, **kwargs):
try:
obj = super(BaseCreateView, self).post(request, *args, **kwargs)
# redirect back to form if errors
except (IntegrityError, __HOLE__), e:
messages.error(request, str(e))
request.method = 'GET'
return super(BaseCreateView, self).get(request, *args, **kwargs)
return obj
|
ValidationError
|
dataset/ETHPy150Open mozilla/inventory/base/views.py/BaseCreateView.post
|
6,990
|
def post(self, request, *args, **kwargs):
try:
obj = super(BaseUpdateView, self).post(request, *args, **kwargs)
except __HOLE__, e:
messages.error(request, str(e))
request.method = 'GET'
return super(BaseUpdateView, self).get(request, *args, **kwargs)
return obj
|
ValidationError
|
dataset/ETHPy150Open mozilla/inventory/base/views.py/BaseUpdateView.post
|
6,991
|
def delete(self, request, *args, **kwargs):
# Get the object to delete
obj = get_object_or_404(
self.form_class.Meta.model, pk=kwargs.get('pk', 0)
)
try:
view = super(BaseDeleteView, self).delete(request, *args, **kwargs)
except __HOLE__, e:
messages.error(request, "Error: {0}".format(' '.join(e.messages)))
return redirect(obj)
messages.success(request, "Deletion Successful")
return view
|
ValidationError
|
dataset/ETHPy150Open mozilla/inventory/base/views.py/BaseDeleteView.delete
|
6,992
|
def complete(self, text, state):
try:
return self.instance_type_names[state]
except __HOLE__:
return None
|
IndexError
|
dataset/ETHPy150Open dnanexus/dx-toolkit/src/python/dxpy/utils/completer.py/InstanceTypesCompleter.complete
|
6,993
|
def interpolation_dict(self):
subs = html4css1.Writer.interpolation_dict(self)
settings = self.document.settings
pyhome = settings.python_home
subs['pyhome'] = pyhome
subs['pephome'] = settings.pep_home
if pyhome == '..':
subs['pepindex'] = '.'
else:
subs['pepindex'] = pyhome + '/dev/peps'
index = self.document.first_child_matching_class(nodes.field_list)
header = self.document[index]
self.pepnum = header[0][1].astext()
subs['pep'] = self.pepnum
if settings.no_random:
subs['banner'] = 0
else:
import random
subs['banner'] = random.randrange(64)
try:
subs['pepnum'] = '%04i' % int(self.pepnum)
except __HOLE__:
subs['pepnum'] = self.pepnum
self.title = header[1][1].astext()
subs['title'] = self.title
subs['body'] = ''.join(
self.body_pre_docinfo + self.docinfo + self.body)
return subs
|
ValueError
|
dataset/ETHPy150Open adieu/allbuttonspressed/docutils/writers/pep_html/__init__.py/Writer.interpolation_dict
|
6,994
|
def delete(self):
for favorite in Vote.all().filter("monster = ",self).run():
favorite.delete()
try:
search.Index(name=_MONSTER_INDEX).delete(str(self.key().id()))
except __HOLE__ as e:
if e.message == 'No api proxy found for service "search"':
pass
else:
raise e
db.Model.delete(self)
|
AssertionError
|
dataset/ETHPy150Open Sagelt/dungeon-world-codex/data/models.py/Monster.delete
|
6,995
|
def equalityHash(elt, equalMode=S_EQUAL, excludeIDs=NO_IDs_EXCLUDED):
if isinstance(elt, ModelObject):
try:
if equalMode == S_EQUAL:
return elt._hashSEqual
else:
return elt._hashXpathEqual
except __HOLE__:
dts = elt.modelXbrl
if not hasattr(elt,"xValid"):
xmlValidate(dts, elt)
hashableValue = elt.sValue if equalMode == S_EQUAL else elt.xValue
if isinstance(hashableValue,float) and math.isnan(hashableValue):
hashableValue = (hashableValue,elt) # ensure this NaN only compares to itself and no other NaN
_hash = hash((elt.elementQname,
hashableValue,
tuple(attributeDict(dts, elt, (), equalMode, excludeIDs, distinguishNaNs=True).items()),
tuple(equalityHash(child,equalMode,excludeIDs) for child in childElements(elt))
))
if equalMode == S_EQUAL:
elt._hashSEqual = _hash
else:
elt._hashXpathEqual = _hash
return _hash
elif isinstance(elt, (tuple,list,set)):
return hash( tuple(equalityHash(i) for i in elt) )
else:
return hash(None)
|
AttributeError
|
dataset/ETHPy150Open Arelle/Arelle/arelle/XbrlUtil.py/equalityHash
|
6,996
|
def attributeDict(modelXbrl, elt, exclusions=set(), equalMode=S_EQUAL, excludeIDs=NO_IDs_EXCLUDED, ns2ns1Tbl=None, keyByTag=False, distinguishNaNs=False):
if not hasattr(elt,"xValid"):
xmlValidate(modelXbrl, elt)
attrs = {}
# TBD: replace with validated attributes
for modelAttribute in elt.xAttributes.values():
attrTag = modelAttribute.attrTag
ns, sep, localName = attrTag.partition('}')
attrNsURI = ns[1:] if sep else None
if ns2ns1Tbl and attrNsURI in ns2ns1Tbl:
attrNsURI = ns2ns1Tbl[attrNsURI]
if (attrTag not in exclusions and
(attrNsURI is None or attrNsURI not in exclusions)):
if keyByTag:
qname = attrTag
elif attrNsURI is not None:
qname = QName(None, attrNsURI, localName)
else:
qname = QName(None, None, attrTag)
try:
if excludeIDs and getattr(modelAttribute, "xValid", 0) == VALID_ID:
continue
if modelAttribute.xValid != UNKNOWN:
value = modelAttribute.sValue if equalMode <= S_EQUAL2 else modelAttribute.xValue
else: # unable to validate, no schema definition, use string value of attribute
value = modelAttribute.text
if distinguishNaNs and isinstance(value,float) and math.isnan(value):
value = (value,elt)
attrs[qname] = value
except __HOLE__:
pass # what should be done if attribute failed to have psvi value
return attrs
|
KeyError
|
dataset/ETHPy150Open Arelle/Arelle/arelle/XbrlUtil.py/attributeDict
|
6,997
|
def typedValue(dts, element, attrQname=None):
try:
if attrQname: # PSVI attribute value
modelAttribute = element.xAttributes[attrQname.clarkNotation]
if modelAttribute.xValid >= VALID:
return modelAttribute.xValue
else: # PSVI element value (of text)
if element.xValid >= VALID:
return element.xValue
except (AttributeError, __HOLE__):
if dts:
xmlValidate(dts, element, recurse=False, attrQname=attrQname)
return typedValue(None, element, attrQname=attrQname)
return None
|
KeyError
|
dataset/ETHPy150Open Arelle/Arelle/arelle/XbrlUtil.py/typedValue
|
6,998
|
def handle(self, *fixture_labels, **options):
from django.db.models import get_apps
from django.core import serializers
from django.db import connection, transaction
from django.conf import settings
self.style = no_style()
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', False)
# commit is a stealth option - it isn't really useful as
# a command line option, but it can be useful when invoking
# loaddata from within another script.
# If commit=True, loaddata will use its own transaction;
# if commit=False, the data load SQL will become part of
# the transaction in place when loaddata was invoked.
commit = options.get('commit', True)
# Keep a count of the installed objects and fixtures
fixture_count = 0
object_count = 0
models = set()
humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connection.cursor()
# Start transaction management. All fixtures are installed in a
# single transaction to ensure that all references are resolved.
if commit:
transaction.commit_unless_managed()
transaction.enter_transaction_management()
transaction.managed(True)
app_fixtures = [os.path.join(os.path.dirname(app.__file__), 'fixtures') for app in get_apps()]
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) == 1:
fixture_name = fixture_label
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
if formats:
if verbosity > 1:
print "Loading '%s' fixtures..." % fixture_name
else:
sys.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format." %
(fixture_name, format)))
transaction.rollback()
transaction.leave_transaction_management()
return
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
if verbosity > 1:
print "Checking %s for fixtures..." % humanize(fixture_dir)
label_found = False
for format in formats:
serializer = serializers.get_serializer(format)
if verbosity > 1:
print "Trying %s for %s fixture '%s'..." % \
(humanize(fixture_dir), format, fixture_name)
try:
full_path = os.path.join(fixture_dir, '.'.join([fixture_name, format]))
fixture = open(full_path, 'r')
if label_found:
fixture.close()
print self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
transaction.rollback()
transaction.leave_transaction_management()
return
else:
fixture_count += 1
objects_in_fixture = 0
if verbosity > 0:
print "Installing %s fixture '%s' from %s." % \
(format, fixture_name, humanize(fixture_dir))
try:
objects = serializers.deserialize(format, fixture)
for obj in objects:
objects_in_fixture += 1
models.add(obj.object.__class__)
obj.save()
object_count += objects_in_fixture
label_found = True
except (__HOLE__, KeyboardInterrupt):
raise
except Exception:
import traceback
fixture.close()
transaction.rollback()
transaction.leave_transaction_management()
if show_traceback:
import traceback
traceback.print_exc()
else:
sys.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s\n" %
(full_path, traceback.format_exc())))
return
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
sys.stderr.write(
self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)" %
(fixture_name)))
transaction.rollback()
transaction.leave_transaction_management()
return
except:
if verbosity > 1:
print "No %s fixture '%s' in %s." % \
(format, fixture_name, humanize(fixture_dir))
# If we found even one object in a fixture, we need to reset the
# database sequences.
if object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
if sequence_sql:
if verbosity > 1:
print "Resetting sequences"
for line in sequence_sql:
cursor.execute(line)
if commit:
transaction.commit()
transaction.leave_transaction_management()
if object_count == 0:
if verbosity > 1:
print "No fixtures found."
else:
if verbosity > 0:
print "Installed %d object(s) from %d fixture(s)" % (object_count, fixture_count)
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
if commit:
connection.close()
|
SystemExit
|
dataset/ETHPy150Open dcramer/django-compositepks/django/core/management/commands/loaddata.py/Command.handle
|
6,999
|
def load_model(self, rules=None, ignore=None, single_group=-1):
"""Loads the current deck into an OpenMDAO component.
Args
----
rules : dict of lists of strings, optional
An optional dictionary of rules can be passed if the component has a
hierarchy of variable trees for its input variables. If no rules dictionary
is passed, ``load_model`` will attempt to find each namelist variable in the
top level of the model hierarchy.
ignore : list of strings, optional
List of variable names that can safely be ignored.
single_group : integer, optional
Group id number to use for processing one single namelist group. Useful
if extra processing is needed or if multiple groups have the same name.
Returns
-------
tuple
Returns a tuple containing the following values:
(empty_groups, unlisted_groups, unlinked_vars).
These need to be examined after calling ``load_model`` to make sure you
loaded every variable into your model.
empty_groups : ordereddict( integer : string )
Names and ID number of groups that don't have cards. This includes
strings found at the top level that aren't comments; these need to
be processed by your wrapper to determine how the information fits
into your component's variable hierarchy.
unlisted_groups : ordereddict( integer : string )
This dictionary includes the names and ID number of groups that have
variables that couldn't be loaded because the group wasn't mentioned
in the rules dictionary.
unlinked_vars : list
List of all variable names that weren't found in the component.
"""
# We support loading a model before setup so that we can interact with it.
if hasattr(self.comp.params, 'keys'):
params = self.comp.params
else:
params = self.comp._init_params_dict
# See Pylint W0102 for why we do this
if not ignore:
ignore = []
if not self.groups:
msg = "Input file must be read with parse_file before " \
"load_model can be executed."
raise RuntimeError(msg)
if single_group > -1:
use_group = iteritems({single_group : self.groups[single_group]})
else:
use_group = enumerate(self.groups)
empty_groups = OrderedDict()
unlisted_groups = OrderedDict()
unlinked_vars = []
used_groups = []
for i, group_name in use_group:
# Report all groups with no cards
if len(self.cards[i]) == 0:
empty_groups[i] = group_name
continue
# If a group_name appears twice, we really don't know where to
# stick the variables, and there are potential data overwrite
# issues. Those cases have to be handled individually.
if group_name in used_groups:
unlisted_groups[i] = group_name
continue
else:
used_groups.append(group_name)
# Process the cards in this group
for card in self.cards[i]:
name = card.name
value = card.value
found = False
if rules:
# If the group isn't in the rules dict, we can't handle
# it now. A separate function call will be required.
try:
containers = rules[group_name]
except __HOLE__:
unlisted_groups[i] = group_name
break
for container in containers:
# Note: FORTRAN is case-insensitive, OpenMDAO is not
varpath1 = "%s:%s" % (container, name)
varpath2 = "%s:%s" % (container, name.lower())
for item in [varpath1, varpath2]:
if item in params:
found = True
varpath = item
break
else:
for item in [name, name.lower()]:
if item in params:
found = True
varpath = item
break
if not found:
if name not in ignore and name.lower() not in ignore:
unlinked_vars.append(name)
else:
# 1D arrays must become ndarrays
target = params[varpath]
# Variables that are passed by array are arrays.
# Everythign else is an object.
if isinstance(target, ndarray) and \
isinstance(value, (float, int)):
value = array([value])
if hasattr(self.comp.params, 'keys'):
params[varpath] = value
else:
params[varpath]['val'] = value
return empty_groups, unlisted_groups, unlinked_vars
|
KeyError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/util/namelist_util.py/Namelist.load_model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.