Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
7,300
|
def _is_installed(self, fullname):
try:
self._import_module(fullname)
return True
except __HOLE__:
return False
|
ImportError
|
dataset/ETHPy150Open nvbn/import_from_github_com/github_com/__init__.py/GithubComLoader._is_installed
|
7,301
|
def cp(src, dst, verbose=True):
if verbose:
echo('Copying: %s -> %s' % (os.path.basename(src), os.path.basename(dst)))
try:
copy(src, dst)
except (ShError, __HOLE__) as e:
error(str(e))
# pylint: enable=C0103
# pylint: disable=C0103
|
IOError
|
dataset/ETHPy150Open turbulenz/turbulenz_local/turbulenz_local/local_server.py/cp
|
7,302
|
def rm(filename, verbose=True):
if verbose:
echo('Removing: %s' % filename)
try:
os.remove(filename)
except __HOLE__ as _:
pass
# pylint: enable=C0103
|
OSError
|
dataset/ETHPy150Open turbulenz/turbulenz_local/turbulenz_local/local_server.py/rm
|
7,303
|
def mkdir(path, verbose=True):
if verbose:
echo('Creating: %s' % path)
try:
os.makedirs(path)
except __HOLE__ as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
|
OSError
|
dataset/ETHPy150Open turbulenz/turbulenz_local/turbulenz_local/local_server.py/mkdir
|
7,304
|
def rmdir(path, verbose=True):
def _handle_remove_readonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
else:
raise
if verbose:
echo('Removing: %s' % path)
try:
rmtree(path, onerror=_handle_remove_readonly)
except __HOLE__:
pass
#######################################################################################################################
|
OSError
|
dataset/ETHPy150Open turbulenz/turbulenz_local/turbulenz_local/local_server.py/rmdir
|
7,305
|
def command_devserver_js(uglifyjs=None):
uglifyjs = uglifyjs or 'external/uglifyjs/bin/uglifyjs'
def compactor(dev_filename, rel_filename):
# Use compactor to generate release version.
echo('Compacting: %s -> %s' % (dev_filename, rel_filename))
rc = call('node %s -o %s %s' % (uglifyjs, rel_filename, dev_filename), shell=True)
if rc != 0:
error('Failed to run uglifyjs, specify location with --uglifyjs')
exit(1)
versions_yaml = os.path.join(TURBULENZ_LOCAL, 'config', 'js_versions.yaml')
dev_path = os.path.join(TURBULENZ_LOCAL, 'public', 'development')
rel_path = os.path.join(TURBULENZ_LOCAL, 'public', 'release')
ext = 'js'
mkdir(os.path.join(rel_path, ext))
try:
compact(dev_path, rel_path, versions_yaml, ext, compactor)
except __HOLE__ as e:
error('Failed to save js version details: %s' % str(e))
|
IOError
|
dataset/ETHPy150Open turbulenz/turbulenz_local/turbulenz_local/local_server.py/command_devserver_js
|
7,306
|
def command_devserver_css(yuicompressor=None):
yuicompressor = yuicompressor or 'external/yuicompressor/yuicompressor-2.4.2/yuicompressor-2.4.2.jar'
def compactor(dev_filename, rel_filename):
# Use compactor to generate release version.
echo('Compacting: %s -> %s' % (dev_filename, rel_filename))
rc = call('java -jar %s --type css -o %s %s' % (yuicompressor, rel_filename, dev_filename), shell=True)
if rc != 0:
error('Failed to run yuicompressor, specify location with --yuicompressor and check java')
exit(1)
versions_yaml = os.path.join(TURBULENZ_LOCAL, 'config', 'css_versions.yaml')
dev_path = os.path.join(TURBULENZ_LOCAL, 'public', 'development')
rel_path = os.path.join(TURBULENZ_LOCAL, 'public', 'release')
ext = 'css'
mkdir(os.path.join(rel_path, ext))
try:
compact(dev_path, rel_path, versions_yaml, ext, compactor, True)
except __HOLE__ as e:
error('Failed to save css version details: %s' % str(e))
|
IOError
|
dataset/ETHPy150Open turbulenz/turbulenz_local/turbulenz_local/local_server.py/command_devserver_css
|
7,307
|
def command_devserver_html():
def compactor(dev_filename, rel_filename):
# Use compactor to generate release version.
echo('Compacting: %s -> %s' % (dev_filename, rel_filename))
source_data = open(dev_filename, 'r').read()
try:
# Verify that the html file is correct
htmlparser = HTMLParser(NullFormatter())
htmlparser.feed(source_data)
htmlparser.close()
# Now try to minify
output_file = open(rel_filename, 'wb')
compactor = HTMLMinifier(output_file.write, True)
compactor.feed(source_data)
compactor.close()
output_file.close()
except HTMLParseError as e:
error(str(e))
exit(1)
versions_yaml = os.path.join(TURBULENZ_LOCAL, 'config', 'html_versions.yaml')
dev_path = os.path.join(TURBULENZ_LOCAL, 'public', 'development')
rel_path = os.path.join(TURBULENZ_LOCAL, 'public', 'release')
ext = 'html'
mkdir(os.path.join(rel_path, ext))
try:
compact(dev_path, rel_path, versions_yaml, ext, compactor)
except __HOLE__ as e:
error('Failed to save html version details: %s' % str(e))
|
IOError
|
dataset/ETHPy150Open turbulenz/turbulenz_local/turbulenz_local/local_server.py/command_devserver_html
|
7,308
|
def command_devserver(args):
# Devserver requires release Javascript and CSS
if args.compile:
command_devserver_js(args.uglifyjs)
command_devserver_css(args.yuicompressor)
command_devserver_html()
if args.development:
start_cmd = 'paster serve --reload development.ini'
else:
start_cmd = 'paster serve --reload release.ini'
if args.options:
start_cmd = '%s %s' % (start_cmd, args.options)
try:
call(start_cmd, cwd=args.home, shell=True)
# We catch this incase we want to close the devserver
except __HOLE__:
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open turbulenz/turbulenz_local/turbulenz_local/local_server.py/command_devserver
|
7,309
|
def _create_sequences(self, func, iterable, chunksize, collector = None):
"""
Create the WorkUnit objects to process and pushes them on the
work queue. Each work unit is meant to process a slice of
iterable of size chunksize. If collector is specified, then
the ApplyResult objects associated with the jobs will notify
collector when their result becomes ready.
\return the list of WorkUnit objects (basically: JobSequences)
pushed onto the work queue
"""
assert not self._closed # No lock here. We assume it's atomic...
sequences = []
results = []
it_ = iter(iterable)
exit_loop = False
while not exit_loop:
seq = []
for i in xrange(chunksize or 1):
try:
arg = it_.next()
except __HOLE__:
exit_loop = True
break
apply_result = ApplyResult(collector)
job = Job(func, (arg,), {}, apply_result)
seq.append(job)
results.append(apply_result)
sequences.append(JobSequence(seq))
for seq in sequences:
self._workq.put(seq)
return sequences
|
StopIteration
|
dataset/ETHPy150Open serverdensity/sd-agent/checks/libs/thread_pool.py/Pool._create_sequences
|
7,310
|
def next(self, timeout=None):
"""Return the next result value in the sequence. Raise
StopIteration at the end. Can raise the exception raised by
the Job"""
try:
apply_result = self._collector._get_result(self._idx, timeout)
except __HOLE__:
# Reset for next time
self._idx = 0
raise StopIteration
except:
self._idx = 0
raise
self._idx += 1
assert apply_result.ready()
return apply_result.get(0)
|
IndexError
|
dataset/ETHPy150Open serverdensity/sd-agent/checks/libs/thread_pool.py/CollectorIterator.next
|
7,311
|
def _get_result(self, idx, timeout=None):
"""Called by the CollectorIterator object to retrieve the
result's values one after another, in the order the results have
become available.
\param idx The index of the result we want, wrt collector's order
\param timeout integer telling how long to wait (in seconds)
for the result at index idx to be available, or None (wait
forever)
"""
self._cond.acquire()
try:
if idx >= self._expected:
raise IndexError
elif idx < len(self._collection):
return self._collection[idx]
elif idx != len(self._collection):
# Violation of the sequence protocol
raise IndexError()
else:
self._cond.wait(timeout=timeout)
try:
return self._collection[idx]
except __HOLE__:
# Still not added !
raise TimeoutError("Timeout while waiting for results")
finally:
self._cond.release()
|
IndexError
|
dataset/ETHPy150Open serverdensity/sd-agent/checks/libs/thread_pool.py/UnorderedResultCollector._get_result
|
7,312
|
def _test():
"""Some tests"""
import thread
import time
def f(x):
return x*x
def work(seconds):
print "[%d] Start to work for %fs..." % (thread.get_ident(), seconds)
time.sleep(seconds)
print "[%d] Work done (%fs)." % (thread.get_ident(), seconds)
return "%d slept %fs" % (thread.get_ident(), seconds)
### Test copy/pasted from multiprocessing
pool = Pool(9) # start 4 worker threads
result = pool.apply_async(f, (10,)) # evaluate "f(10)" asynchronously
print result.get(timeout=1) # prints "100" unless slow computer
print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]"
it = pool.imap(f, range(10))
print it.next() # prints "0"
print it.next() # prints "1"
print it.next(timeout=1) # prints "4" unless slow computer
# Test apply_sync exceptions
result = pool.apply_async(time.sleep, (3,))
try:
print result.get(timeout=1) # raises `TimeoutError`
except TimeoutError:
print "Good. Got expected timeout exception."
else:
assert False, "Expected exception !"
print result.get()
def cb(s):
print "Result ready: %s" % s
# Test imap()
for res in pool.imap(work, xrange(10, 3, -1), chunksize=4):
print "Item:", res
# Test imap_unordered()
for res in pool.imap_unordered(work, xrange(10, 3, -1)):
print "Item:", res
# Test map_async()
result = pool.map_async(work, xrange(10), callback=cb)
try:
print result.get(timeout=1) # raises `TimeoutError`
except TimeoutError:
print "Good. Got expected timeout exception."
else:
assert False, "Expected exception !"
print result.get()
# Test imap_async()
result = pool.imap_async(work, xrange(3, 10), callback=cb)
try:
print result.get(timeout=1) # raises `TimeoutError`
except TimeoutError:
print "Good. Got expected timeout exception."
else:
assert False, "Expected exception !"
for i in result.get():
print "Item:", i
print "### Loop again:"
for i in result.get():
print "Item2:", i
# Test imap_unordered_async()
result = pool.imap_unordered_async(work, xrange(10, 3, -1), callback=cb)
try:
print result.get(timeout=1) # raises `TimeoutError`
except TimeoutError:
print "Good. Got expected timeout exception."
else:
assert False, "Expected exception !"
for i in result.get():
print "Item1:", i
for i in result.get():
print "Item2:", i
r = result.get()
for i in r:
print "Item3:", i
for i in r:
print "Item4:", i
for i in r:
print "Item5:", i
#
# The case for the exceptions
#
# Exceptions in imap_unordered_async()
result = pool.imap_unordered_async(work, xrange(2, -10, -1), callback=cb)
time.sleep(3)
try:
for i in result.get():
print "Got item:", i
except __HOLE__:
print "Good. Got expected exception:"
traceback.print_exc()
# Exceptions in imap_async()
result = pool.imap_async(work, xrange(2, -10, -1), callback=cb)
time.sleep(3)
try:
for i in result.get():
print "Got item:", i
except IOError:
print "Good. Got expected exception:"
traceback.print_exc()
# Stop the test: need to stop the pool !!!
pool.terminate()
print "End of tests"
|
IOError
|
dataset/ETHPy150Open serverdensity/sd-agent/checks/libs/thread_pool.py/_test
|
7,313
|
def Validate(self, value, key=None):
"""Validates a timezone."""
if value is None:
return
if not isinstance(value, basestring):
raise TypeError('timezone must be a string, not \'%r\'' % type(value))
if pytz is None:
return value
try:
pytz.timezone(value)
except pytz.UnknownTimeZoneError:
raise validation.ValidationError('timezone \'%s\' is unknown' % value)
except __HOLE__:
return value
except:
unused_e, v, t = sys.exc_info()
logging.warning('pytz raised an unexpected error: %s.\n' % (v) +
'Traceback:\n' + '\n'.join(traceback.format_tb(t)))
raise
return value
|
IOError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/api/croninfo.py/TimezoneValidator.Validate
|
7,314
|
def get_system_username():
"""
Try to determine the current system user's username.
:returns: The username as a unicode string, or an empty string if the
username could not be determined.
"""
try:
result = getpass.getuser()
except (__HOLE__, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
return ''
if six.PY2:
try:
result = result.decode(DEFAULT_LOCALE_ENCODING)
except UnicodeDecodeError:
# UnicodeDecodeError - preventive treatment for non-latin Windows.
return ''
return result
|
ImportError
|
dataset/ETHPy150Open django/django/django/contrib/auth/management/__init__.py/get_system_username
|
7,315
|
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
# This file is used in apps.py, it should not trigger models import.
from django.contrib.auth import models as auth_app
# If the User model has been swapped out, we can't make any assumptions
# about the default user name.
if auth_app.User._meta.swapped:
return ''
default_username = get_system_username()
try:
default_username = (
unicodedata.normalize('NFKD', default_username)
.encode('ascii', 'ignore').decode('ascii')
.replace(' ', '').lower()
)
except __HOLE__:
return ''
# Run the username validator
try:
auth_app.User._meta.get_field('username').run_validators(default_username)
except exceptions.ValidationError:
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ''
return default_username
|
UnicodeDecodeError
|
dataset/ETHPy150Open django/django/django/contrib/auth/management/__init__.py/get_default_username
|
7,316
|
def exists(self, path):
try:
self._sftp.lstat(path)
return True
except __HOLE__:
return False
|
IOError
|
dataset/ETHPy150Open openstack/fuel-devops/devops/helpers/ssh_client.py/SSHClient.exists
|
7,317
|
def isfile(self, path):
try:
attrs = self._sftp.lstat(path)
return attrs.st_mode & stat.S_IFREG != 0
except __HOLE__:
return False
|
IOError
|
dataset/ETHPy150Open openstack/fuel-devops/devops/helpers/ssh_client.py/SSHClient.isfile
|
7,318
|
def isdir(self, path):
try:
attrs = self._sftp.lstat(path)
return attrs.st_mode & stat.S_IFDIR != 0
except __HOLE__:
return False
|
IOError
|
dataset/ETHPy150Open openstack/fuel-devops/devops/helpers/ssh_client.py/SSHClient.isdir
|
7,319
|
def parse(self, bibtex_contents):
ret = []
cleaned_string = bibtex_contents.replace("\&", "").replace("%", "").strip()
entries = ["@"+entry for entry in cleaned_string.split("@") if entry]
biblio_list = self._parse_bibtex_entries(entries)
for biblio in biblio_list:
parsed = {}
try:
mykey = biblio.entries.keys()[0]
except AttributeError:
# doesn't seem to be a valid biblio object, so skip to the next one
logger.info(u"%20s NO DOI because no entries attribute in %s" % (self.provider_name, biblio))
continue
try:
parsed["journal"] = self._to_unicode(biblio.entries[mykey].fields["journal"])
except KeyError:
parsed["journal"] = ""
try:
lnames = [person.get_part_as_text("last") for person in biblio.entries[mykey].persons["author"]]
parsed["first_author"] = self._to_unicode(lnames[0])
except (KeyError, AttributeError):
try:
parsed["first_author"] = self._to_unicode(biblio.entries[mykey].fields["author"][0].split(",")[0])
except (KeyError, __HOLE__):
parsed["first_author"] = ""
try:
lnames = [person.get_part_as_text("last") for person in biblio.entries[mykey].persons["author"]]
parsed["authors"] = self._to_unicode(", ".join(lnames))
except (KeyError, AttributeError):
parsed["authors"] = ""
try:
parsed["number"] = biblio.entries[mykey].fields["number"]
except KeyError:
parsed["number"] = ""
try:
parsed["volume"] = biblio.entries[mykey].fields["volume"]
except KeyError:
parsed["volume"] = ""
try:
pages = biblio.entries[mykey].fields["pages"]
parsed["first_page"] = pages.split("--")[0]
except KeyError:
parsed["first_page"] = ""
try:
year_string = biblio.entries[mykey].fields["year"].replace("{}", "")
parsed["year"] = re.sub("\D", "", year_string)
except KeyError:
parsed["year"] = ""
try:
parsed["title"] = self._to_unicode(biblio.entries[mykey].fields["title"])
except KeyError:
parsed["title"] = ""
#parsed["key"] = mykey
ret.append(parsed)
return ret
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/providers/bibtex.py/Bibtex.parse
|
7,320
|
def forwards(self, orm):
# Removing unique constraint on 'KeyValue', fields ['language', 'digest']
try:
db.delete_unique('datatrans_keyvalue', ['language', 'digest'])
except __HOLE__:
print " WARNING: current index didn't exist"
# Adding field 'KeyValue.content_type'
db.add_column('datatrans_keyvalue', 'content_type', self.gf('django.db.models.fields.related.ForeignKey')(default=None, null=True, to=orm['contenttypes.ContentType']), keep_default=False)
# Adding field 'KeyValue.object_id'
db.add_column('datatrans_keyvalue', 'object_id', self.gf('django.db.models.fields.PositiveIntegerField')(default=None, null=True), keep_default=False)
# Adding field 'KeyValue.field'
db.add_column('datatrans_keyvalue', 'field', self.gf('django.db.models.fields.TextField')(default=""), keep_default=False)
|
ValueError
|
dataset/ETHPy150Open hzlf/openbroadcast/website/apps/datatrans/migrations/0003_auto__add_field_keyvalue_content_type__add_field_keyvalue_object_id__a.py/Migration.forwards
|
7,321
|
def get_owner_username(domain, owner_type, facility_id):
if not owner_type:
return ''
facility_index_by_domain = indexed_facilities()
try:
return facility_index_by_domain[domain][facility_id][owner_type]
except __HOLE__:
return None
|
KeyError
|
dataset/ETHPy150Open dimagi/commcare-hq/custom/_legacy/hsph/tasks.py/get_owner_username
|
7,322
|
def get_group_id(domain, owner_type, facility_id):
owner_username = get_owner_username(domain, owner_type, facility_id)
try:
return INDEXED_GROUPS[domain][owner_username]._id
except __HOLE__:
return None
|
KeyError
|
dataset/ETHPy150Open dimagi/commcare-hq/custom/_legacy/hsph/tasks.py/get_group_id
|
7,323
|
def __add__(self, other):
try:
return theano.tensor.basic.add(self, other)
# We should catch the minimum number of exception here.
# Otherwise this will convert error when Theano flags
# compute_test_value is used
# Evidently, we need to catch NotImplementedError
# TypeError from as_tensor_variable are caught in Elemwise.make_node
# Oterwise TensorVariable * SparseVariable won't work!
except (__HOLE__, AsTensorError):
# We must return NotImplemented and not an
# NotImplementedError or raise an NotImplementedError.
# That way python will give a good error message like this
# `TypeError: unsupported operand type(s) for +:
# 'TensorVariable' and 'TensorVariable'`
return NotImplemented
|
NotImplementedError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/var.py/_tensor_py_operators.__add__
|
7,324
|
def __sub__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.basic.sub(self, other)
except (__HOLE__, AsTensorError):
return NotImplemented
|
NotImplementedError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/var.py/_tensor_py_operators.__sub__
|
7,325
|
def __mul__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.mul(self, other)
except (__HOLE__, AsTensorError):
return NotImplemented
|
NotImplementedError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/var.py/_tensor_py_operators.__mul__
|
7,326
|
def __div__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.basic.div_proxy(self, other)
except IntegerDivisionError:
# This is to raise the exception that occurs when trying to divide
# two integer arrays (currently forbidden).
raise
except (__HOLE__, AsTensorError):
return NotImplemented
|
NotImplementedError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/var.py/_tensor_py_operators.__div__
|
7,327
|
def __pow__(self, other):
# See explanation in __add__ for the error catched
# adn the return value in that case
try:
return theano.tensor.basic.pow(self, other)
except (__HOLE__, AsTensorError):
return NotImplemented
|
NotImplementedError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/var.py/_tensor_py_operators.__pow__
|
7,328
|
def __mod__(self, other):
# See explanation in __add__ for the error catched
# adn the return value in that case
try:
return theano.tensor.basic.mod_check(self, other)
except ComplexError:
# This is to raise the exception that occurs when trying to compute
# x % y with either x or y a complex number.
raise
except (__HOLE__, AsTensorError):
return NotImplemented
|
NotImplementedError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/var.py/_tensor_py_operators.__mod__
|
7,329
|
def transpose(self, *axes):
"""
Returns
-------
object
`tensor.transpose(self, axes)` or `tensor.transpose(self, axes[0])`.
If only one `axes` argument is provided and it is iterable, then it is
assumed to be the entire axes tuple, and passed intact to
tensor.transpose.
"""
if len(axes) == 0:
return theano.tensor.basic.transpose(self)
try:
iter(axes[0])
iterable = True
except __HOLE__:
iterable = False
if len(axes) == 1 and iterable:
return theano.tensor.basic.transpose(self, axes[0])
else:
return theano.tensor.basic.transpose(self, axes)
|
TypeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/var.py/_tensor_py_operators.transpose
|
7,330
|
def __iter__(self):
try:
for i in xrange(theano.tensor.basic.get_vector_length(self)):
yield self[i]
except __HOLE__:
# This prevents accidental iteration via builtin.sum(self)
raise TypeError(('TensorType does not support iteration. '
'Maybe you are using builtin.sum instead of '
'theano.tensor.sum? (Maybe .max?)'))
# CONVENIENT ACCESS TO TYPE PROPERTIES
|
TypeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/var.py/_tensor_py_operators.__iter__
|
7,331
|
def _get_sum(self):
"""Compute sum of non NaN / Inf values in the array."""
try:
return self._sum
except __HOLE__:
self._sum = self.no_nan.sum()
# The following 2 lines are needede as in Python 3.3 with NumPy
# 1.7.1, numpy.ndarray and numpy.memmap aren't hashable.
if type(self._sum) is numpy.memmap:
self._sum = numpy.asarray(self._sum).item()
if self.has_nan and self.no_nan.mask.all():
# In this case the sum is not properly computed by numpy.
self._sum = 0
if numpy.isinf(self._sum) or numpy.isnan(self._sum):
# NaN may happen when there are both -inf and +inf values.
if self.has_nan:
# Filter both NaN and Inf values.
mask = self.no_nan.mask + numpy.isinf(self[1])
else:
# Filter only Inf values.
mask = numpy.isinf(self[1])
if mask.all():
self._sum = 0
else:
self._sum = numpy.ma.masked_array(self[1], mask).sum()
# At this point there should be no more NaN.
assert not numpy.isnan(self._sum)
return self._sum
|
AttributeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/var.py/TensorConstantSignature._get_sum
|
7,332
|
def _get_no_nan(self):
try:
return self._no_nan
except __HOLE__:
nan_mask = numpy.isnan(self[1])
if nan_mask.any():
self._no_nan = numpy.ma.masked_array(self[1], nan_mask)
self.has_nan = True
else:
self._no_nan = self[1]
self.has_nan = False
return self._no_nan
|
AttributeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/var.py/TensorConstantSignature._get_no_nan
|
7,333
|
def get_child_index(self, key):
try:
return self.get_child_keys().index(key)
except __HOLE__:
errorstring = ("Can't find key %s in ParentNode %s\n" +
"ParentNode items: %s")
raise TreeWidgetError(errorstring % (key, self.get_key(),
str(self.get_child_keys())))
|
ValueError
|
dataset/ETHPy150Open AnyMesh/anyMesh-Python/example/urwid/treetools.py/ParentNode.get_child_index
|
7,334
|
def start(self):
""" Run the server. """
# Bind to the UDP socket.
# IPv4 only
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(0)
try:
self.socket.bind(self.address)
except socket.gaierror:
if self.address[0] == 'localhost':
log.warning("Warning localhost seems undefined in your host file, using 127.0.0.1 instead")
self.address = ('127.0.0.1', self.address[1])
self.socket.bind(self.address)
log.info('Listening on host & port: %s' % str(self.address))
# Inline variables for quick look-up.
buffer_size = self.buffer_size
aggregator_submit = self.metrics_aggregator.submit_packets
sock = [self.socket]
socket_recv = self.socket.recv
select_select = select.select
select_error = select.error
timeout = UDP_SOCKET_TIMEOUT
should_forward = self.should_forward
forward_udp_sock = self.forward_udp_sock
# Run our select loop.
self.running = True
while self.running:
try:
ready = select_select(sock, [], [], timeout)
if ready[0]:
message = socket_recv(buffer_size)
aggregator_submit(message)
if should_forward:
forward_udp_sock.send(message)
except select_error, se:
# Ignore interrupted system calls from sigterm.
errno = se[0]
if errno != 4:
raise
except (__HOLE__, SystemExit):
break
except Exception:
log.exception('Error receiving datagram')
|
KeyboardInterrupt
|
dataset/ETHPy150Open serverdensity/sd-agent/dogstatsd.py/Server.start
|
7,335
|
def qapply_Mul(e, **options):
ip_doit = options.get('ip_doit', True)
args = list(e.args)
# If we only have 0 or 1 args, we have nothing to do and return.
if len(args) <= 1 or not isinstance(e, Mul):
return e
rhs = args.pop()
lhs = args.pop()
# Make sure we have two non-commutative objects before proceeding.
if (sympify(rhs).is_commutative and not isinstance(rhs, Wavefunction)) or \
(sympify(lhs).is_commutative and not isinstance(lhs, Wavefunction)):
return e
# For a Pow with an integer exponent, apply one of them and reduce the
# exponent by one.
if isinstance(lhs, Pow) and lhs.exp.is_Integer:
args.append(lhs.base**(lhs.exp - 1))
lhs = lhs.base
# Pull OuterProduct apart
if isinstance(lhs, OuterProduct):
args.append(lhs.ket)
lhs = lhs.bra
# Call .doit() on Commutator/AntiCommutator.
if isinstance(lhs, (Commutator, AntiCommutator)):
comm = lhs.doit()
if isinstance(comm, Add):
return qapply(
e.func(*(args + [comm.args[0], rhs])) +
e.func(*(args + [comm.args[1], rhs])),
**options
)
else:
return qapply(e.func(*args)*comm*rhs, **options)
# Apply tensor products of operators to states
if isinstance(lhs, TensorProduct) and all([isinstance(arg, (Operator, State, Mul, Pow)) or arg == 1 for arg in lhs.args]) and \
isinstance(rhs, TensorProduct) and all([isinstance(arg, (Operator, State, Mul, Pow)) or arg == 1 for arg in rhs.args]) and \
len(lhs.args) == len(rhs.args):
result = TensorProduct(*[qapply(lhs.args[n]*rhs.args[n], **options) for n in range(len(lhs.args))]).expand(tensorproduct=True)
return qapply_Mul(e.func(*args), **options)*result
# Now try to actually apply the operator and build an inner product.
try:
result = lhs._apply_operator(rhs, **options)
except (NotImplementedError, __HOLE__):
try:
result = rhs._apply_operator(lhs, **options)
except (NotImplementedError, AttributeError):
if isinstance(lhs, BraBase) and isinstance(rhs, KetBase):
result = InnerProduct(lhs, rhs)
if ip_doit:
result = result.doit()
else:
result = None
# TODO: I may need to expand before returning the final result.
if result == 0:
return S.Zero
elif result is None:
if len(args) == 0:
# We had two args to begin with so args=[].
return e
else:
return qapply_Mul(e.func(*(args + [lhs])), **options)*rhs
elif isinstance(result, InnerProduct):
return result*qapply_Mul(e.func(*args), **options)
else: # result is a scalar times a Mul, Add or TensorProduct
return qapply(e.func(*args)*result, **options)
|
AttributeError
|
dataset/ETHPy150Open sympy/sympy/sympy/physics/quantum/qapply.py/qapply_Mul
|
7,336
|
def deploy_service(
service,
instance,
marathon_jobid,
config,
client,
bounce_method,
drain_method_name,
drain_method_params,
nerve_ns,
bounce_health_params,
soa_dir,
):
"""Deploy the service to marathon, either directly or via a bounce if needed.
Called by setup_service when it's time to actually deploy.
:param service: The name of the service to deploy
:param instance: The instance of the service to deploy
:param marathon_jobid: Full id of the marathon job
:param config: The complete configuration dict to send to marathon
:param client: A MarathonClient object
:param bounce_method: The bounce method to use, if needed
:param drain_method_name: The name of the traffic draining method to use.
:param nerve_ns: The nerve namespace to look in.
:param bounce_health_params: A dictionary of options for bounce_lib.get_happy_tasks.
:returns: A tuple of (status, output) to be used with send_sensu_event"""
def log_deploy_error(errormsg, level='event'):
return _log(
service=service,
line=errormsg,
component='deploy',
level='event',
cluster=cluster,
instance=instance
)
short_id = marathon_tools.format_job_id(service, instance)
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
existing_apps = marathon_tools.get_matching_apps(service, instance, client, embed_failures=True)
new_app_list = [a for a in existing_apps if a.id == '/%s' % config['id']]
other_apps = [a for a in existing_apps if a.id != '/%s' % config['id']]
serviceinstance = "%s.%s" % (service, instance)
if new_app_list:
new_app = new_app_list[0]
if len(new_app_list) != 1:
raise ValueError("Only expected one app per ID; found %d" % len(new_app_list))
new_app_running = True
happy_new_tasks = bounce_lib.get_happy_tasks(new_app, service, nerve_ns, system_paasta_config,
**bounce_health_params)
else:
new_app_running = False
happy_new_tasks = []
try:
drain_method = drain_lib.get_drain_method(
drain_method_name,
service=service,
instance=instance,
nerve_ns=nerve_ns,
drain_method_params=drain_method_params,
)
except __HOLE__:
errormsg = 'ERROR: drain_method not recognized: %s. Must be one of (%s)' % \
(drain_method_name, ', '.join(drain_lib.list_drain_methods()))
log_deploy_error(errormsg)
return (1, errormsg)
old_app_live_happy_tasks, old_app_live_unhappy_tasks, old_app_draining_tasks = get_old_happy_unhappy_draining_tasks(
other_apps,
drain_method,
service,
nerve_ns,
bounce_health_params,
system_paasta_config,
)
if new_app_running:
protected_draining_tasks = set()
if new_app.instances < config['instances']:
client.scale_app(app_id=new_app.id, instances=config['instances'], force=True)
elif new_app.instances > config['instances']:
num_tasks_to_scale = max(min(len(new_app.tasks), new_app.instances) - config['instances'], 0)
task_dict = get_old_happy_unhappy_draining_tasks_for_app(
new_app,
drain_method,
service,
nerve_ns,
bounce_health_params,
system_paasta_config,
)
scaling_app_happy_tasks = list(task_dict['happy'])
scaling_app_unhappy_tasks = list(task_dict['unhappy'])
scaling_app_draining_tasks = list(task_dict['draining'])
tasks_to_move_draining = min(len(scaling_app_draining_tasks), num_tasks_to_scale)
old_app_draining_tasks[new_app.id] = set(scaling_app_draining_tasks[:tasks_to_move_draining])
protected_draining_tasks.update(scaling_app_draining_tasks[:tasks_to_move_draining])
num_tasks_to_scale = num_tasks_to_scale - tasks_to_move_draining
tasks_to_move_unhappy = min(len(scaling_app_unhappy_tasks), num_tasks_to_scale)
old_app_live_unhappy_tasks[new_app.id] = set(scaling_app_unhappy_tasks[:tasks_to_move_unhappy])
num_tasks_to_scale = num_tasks_to_scale - tasks_to_move_unhappy
tasks_to_move_happy = min(len(scaling_app_happy_tasks), num_tasks_to_scale)
old_app_live_happy_tasks[new_app.id] = set(scaling_app_happy_tasks[:tasks_to_move_happy])
happy_new_tasks = scaling_app_happy_tasks[tasks_to_move_happy:]
# If any tasks on the new app happen to be draining (e.g. someone reverts to an older version with
# `paasta mark-for-deployment`), then we should undrain them.
for task in new_app.tasks:
if task not in protected_draining_tasks:
drain_method.stop_draining(task)
# Re-drain any already draining tasks on old apps
for tasks in old_app_draining_tasks.values():
for task in tasks:
drain_method.drain(task)
# log all uncaught exceptions and raise them again
try:
try:
bounce_func = bounce_lib.get_bounce_method_func(bounce_method)
except KeyError:
errormsg = 'ERROR: bounce_method not recognized: %s. Must be one of (%s)' % \
(bounce_method, ', '.join(bounce_lib.list_bounce_methods()))
log_deploy_error(errormsg)
return (1, errormsg)
try:
with bounce_lib.bounce_lock_zookeeper(short_id):
do_bounce(
bounce_func=bounce_func,
drain_method=drain_method,
config=config,
new_app_running=new_app_running,
happy_new_tasks=happy_new_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
old_app_draining_tasks=old_app_draining_tasks,
service=service,
bounce_method=bounce_method,
serviceinstance=serviceinstance,
cluster=cluster,
instance=instance,
marathon_jobid=marathon_jobid,
client=client,
soa_dir=soa_dir,
)
except bounce_lib.LockHeldException:
log.error("Instance %s already being bounced. Exiting", short_id)
return (1, "Instance %s is already being bounced." % short_id)
except Exception:
loglines = ['Exception raised during deploy of service %s:' % service]
loglines.extend(traceback.format_exc().rstrip().split("\n"))
for logline in loglines:
log_deploy_error(logline, level='debug')
raise
return (0, 'Service deployed.')
|
KeyError
|
dataset/ETHPy150Open Yelp/paasta/paasta_tools/setup_marathon_job.py/deploy_service
|
7,337
|
def main():
"""Attempt to set up the marathon service instance given.
Exits 1 if the deployment failed.
This is done in the following order:
- Load the marathon configuration
- Connect to marathon
- Load the service instance's configuration
- Create the complete marathon job configuration
- Deploy/bounce the service
- Emit an event about the deployment to sensu"""
args = parse_args()
soa_dir = args.soa_dir
if args.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.WARNING)
try:
service, instance, _, __ = decompose_job_id(args.service_instance)
except InvalidJobNameError:
log.error("Invalid service instance specified. Format is service%sinstance." % SPACER)
sys.exit(1)
marathon_config = get_main_marathon_config()
client = marathon_tools.get_marathon_client(marathon_config.get_url(), marathon_config.get_username(),
marathon_config.get_password())
try:
service_instance_config = marathon_tools.load_marathon_service_config(
service,
instance,
load_system_paasta_config().get_cluster(),
soa_dir=soa_dir,
)
except NoDeploymentsAvailable:
log.debug("No deployments found for %s in cluster %s. Skipping." % (args.service_instance,
load_system_paasta_config().get_cluster()))
sys.exit(0)
except NoConfigurationForServiceError:
error_msg = "Could not read marathon configuration file for %s in cluster %s" % \
(args.service_instance, load_system_paasta_config().get_cluster())
log.error(error_msg)
sys.exit(1)
try:
status, output = setup_service(service, instance, client, marathon_config,
service_instance_config, soa_dir)
sensu_status = pysensu_yelp.Status.CRITICAL if status else pysensu_yelp.Status.OK
send_event(service, instance, soa_dir, sensu_status, output)
# We exit 0 because the script finished ok and the event was sent to the right team.
sys.exit(0)
except (__HOLE__, TypeError, AttributeError, InvalidInstanceConfig):
import traceback
error_str = traceback.format_exc()
log.error(error_str)
send_event(service, instance, soa_dir, pysensu_yelp.Status.CRITICAL, error_str)
# We exit 0 because the script finished ok and the event was sent to the right team.
sys.exit(0)
|
KeyError
|
dataset/ETHPy150Open Yelp/paasta/paasta_tools/setup_marathon_job.py/main
|
7,338
|
def load_floorc_json():
# Expose a few settings for curious users to tweak
s = {
'expert_mode': False,
'debug': False,
}
try:
with open(G.FLOORC_JSON_PATH, 'r') as fd:
floorc_json = fd.read()
except IOError as e:
if e.errno == errno.ENOENT:
return s
raise
try:
default_settings = json.loads(floorc_json)
except __HOLE__:
return s
for k, v in default_settings.items():
s[k.upper()] = v
return s
|
ValueError
|
dataset/ETHPy150Open Floobits/floobits-sublime/floo/common/utils.py/load_floorc_json
|
7,339
|
def _set_timeout(func, timeout, repeat, *args, **kwargs):
timeout_id = set_timeout._top_timeout_id
if timeout_id > 100000:
set_timeout._top_timeout_id = 0
else:
set_timeout._top_timeout_id += 1
try:
from . import api
except __HOLE__:
import api
@api.send_errors
def timeout_func():
timeout_ids.discard(timeout_id)
if timeout_id in cancelled_timeouts:
cancelled_timeouts.remove(timeout_id)
return
func(*args, **kwargs)
if repeat:
editor.set_timeout(timeout_func, timeout)
timeout_ids.add(timeout_id)
editor.set_timeout(timeout_func, timeout)
timeout_ids.add(timeout_id)
return timeout_id
|
ImportError
|
dataset/ETHPy150Open Floobits/floobits-sublime/floo/common/utils.py/_set_timeout
|
7,340
|
def is_shared(p):
if not G.AGENT or not G.AGENT.joined_workspace:
return False
p = unfuck_path(p)
try:
if to_rel_path(p).find('../') == 0:
return False
except __HOLE__:
return False
return True
|
ValueError
|
dataset/ETHPy150Open Floobits/floobits-sublime/floo/common/utils.py/is_shared
|
7,341
|
def read_floo_file(path):
floo_file = os.path.join(path, '.floo')
info = {}
try:
floo_info = open(floo_file, 'rb').read().decode('utf-8')
info = json.loads(floo_info)
except (IOError, __HOLE__):
pass
except Exception as e:
msg.warn('Couldn\'t read .floo file: ', floo_file, ': ', str_e(e))
return info
|
OSError
|
dataset/ETHPy150Open Floobits/floobits-sublime/floo/common/utils.py/read_floo_file
|
7,342
|
def get_persistent_data(per_path=None):
per_data = {'recent_workspaces': [], 'workspaces': {}}
per_path = per_path or os.path.join(G.BASE_DIR, 'persistent.json')
try:
per = open(per_path, 'rb')
except (IOError, __HOLE__):
msg.debug('Failed to open ', per_path, '. Recent workspace list will be empty.')
return per_data
try:
data = per.read().decode('utf-8')
persistent_data = json.loads(data)
except Exception as e:
msg.debug('Failed to parse ', per_path, '. Recent workspace list will be empty.')
msg.debug(str_e(e))
msg.debug(data)
return per_data
if 'recent_workspaces' not in persistent_data:
persistent_data['recent_workspaces'] = []
if 'workspaces' not in persistent_data:
persistent_data['workspaces'] = {}
return persistent_data
|
OSError
|
dataset/ETHPy150Open Floobits/floobits-sublime/floo/common/utils.py/get_persistent_data
|
7,343
|
def rm(path):
"""removes path and dirs going up until a OSError"""
os.remove(path)
try:
os.removedirs(os.path.split(path)[0])
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open Floobits/floobits-sublime/floo/common/utils.py/rm
|
7,344
|
def mkdir(path):
try:
os.makedirs(path)
except __HOLE__ as e:
if e.errno != errno.EEXIST:
editor.error_message('Cannot create directory {0}.\n{1}'.format(path, str_e(e)))
raise
|
OSError
|
dataset/ETHPy150Open Floobits/floobits-sublime/floo/common/utils.py/mkdir
|
7,345
|
def _unwind_generator(gen_expr, cb=None, res=None):
try:
while True:
maybe_func = res
args = []
# if the first arg is callable, we need to call it (and assume the last argument is a callback)
if type(res) == tuple:
maybe_func = len(res) and res[0]
if not callable(maybe_func):
# send only accepts one argument... this is slightly dangerous if
# we ever just return a tuple of one elemetn
# TODO: catch no generator
if type(res) == tuple and len(res) == 1:
res = gen_expr.send(res[0])
else:
res = gen_expr.send(res)
continue
def f(*args):
return _unwind_generator(gen_expr, cb, args)
try:
args = list(res)[1:]
except:
# assume not iterable
args = []
args.append(f)
return maybe_func(*args)
# TODO: probably shouldn't catch StopIteration to return since that can occur by accident...
except __HOLE__:
pass
except __StopUnwindingException as e:
res = e.ret_val
if cb:
return cb(res)
return res
|
StopIteration
|
dataset/ETHPy150Open Floobits/floobits-sublime/floo/common/utils.py/_unwind_generator
|
7,346
|
def read_min_cells(self, ids):
'''reads cells and cache only the minimum information necessary to compute compatibility
checks. The reason is to save memory in the server.
This funcion is only called by RefTranslator.get_published_min_refs, that is only called
by CompatibilityClosureBuilder, only in FIND.
Thus, FIND MUST always use a MemServerStore'''
missing_ids = set(ids).difference(self.min_cells)
cells = self._store.read_published_cells(missing_ids)
for id_, cell in cells.iteritems():
if isinstance(cell, SimpleCell):
self.min_cells[id_] = (cell.root, list(cell.dependencies.targets))
else:
self.min_cells[id_] = (cell.root, list(cell.resource_leaves))
result = {}
for id_ in ids:
try:
result[id_] = self.min_cells[id_]
except __HOLE__:
pass
return result
|
KeyError
|
dataset/ETHPy150Open biicode/bii-server/store/mem_server_store.py/MemServerStore.read_min_cells
|
7,347
|
def __init__(self):
try:
self.config = yaml.load(open(sys.argv[1]))
except __HOLE__:
print('Error: not specify config file')
exit(1)
self.dump_cmd = 'mysqldump -h {host} -P {port} -u {user} --password={password} {db} {table} ' \
'--default-character-set=utf8 -X'.format(**self.config['mysql'])
self.binlog_conf = dict(
[(key, self.config['mysql'][key]) for key in ['host', 'port', 'user', 'password', 'db']]
)
self.endpoint = 'http://{host}:{port}/{index}/{type}/_bulk'.format(
host=self.config['elastic']['host'],
port=self.config['elastic']['port'],
index=self.config['elastic']['index'],
type=self.config['elastic']['type']
) # todo: supporting multi-index
self.mapping = self.config.get('mapping') or {}
if self.mapping.get('_id'):
self.id_key = self.mapping.pop('_id')
else:
self.id_key = None
record_path = self.config['binlog_sync']['record_file']
if os.path.isfile(record_path):
with open(record_path, 'r') as f:
record = yaml.load(f)
self.log_file = record.get('log_file')
self.log_pos = record.get('log_pos')
self.bulk_size = self.config.get('elastic').get('bulk_size') or DEFAULT_BULKSIZE
self.binlog_bulk_size = self.config.get('elastic').get('binlog_bulk_size') or DEFAULT_BINLOG_BULKSIZE
self._init_logging()
|
IndexError
|
dataset/ETHPy150Open zhongbiaodev/py-mysql-elasticsearch-sync/es_sync/__init__.py/ElasticSync.__init__
|
7,348
|
def _formatter(self, data):
"""
format every field from xml, according to parsed table structure
"""
for item in data:
for field, serializer in self.table_structure.items():
if item['doc'][field]:
try:
item['doc'][field] = serializer(item['doc'][field])
except __HOLE__ as e:
self.logger.error(
"Error occurred during format, ErrorMessage:{msg}, ErrorItem:{item}".format(
msg=str(e),
item=str(item)))
item['doc'][field] = None
# print(item)
yield item
|
ValueError
|
dataset/ETHPy150Open zhongbiaodev/py-mysql-elasticsearch-sync/es_sync/__init__.py/ElasticSync._formatter
|
7,349
|
def _parse_and_remove(self, f, path):
"""
snippet from python cookbook, for parsing large xml file
"""
path_parts = path.split('/')
doc = iterparse(f, ('start', 'end'), recover=False, encoding='utf-8', huge_tree=True)
# Skip the root element
next(doc)
tag_stack = []
elem_stack = []
for event, elem in doc:
if event == 'start':
tag_stack.append(elem.tag)
elem_stack.append(elem)
elif event == 'end':
if tag_stack == path_parts:
yield elem
elem_stack[-2].remove(elem)
if tag_stack == ['database', 'table_structure']:
# dirty hack for getting the tables structure
self._parse_table_structure(elem)
elem_stack[-2].remove(elem)
try:
tag_stack.pop()
elem_stack.pop()
except __HOLE__:
pass
|
IndexError
|
dataset/ETHPy150Open zhongbiaodev/py-mysql-elasticsearch-sync/es_sync/__init__.py/ElasticSync._parse_and_remove
|
7,350
|
def got_message(self, connection, message):
c = self.connections[connection]
t = message[0]
if t == BITFIELD and c.got_anything:
connection.close()
return
c.got_anything = True
if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and
len(message) != 1):
connection.close()
return
if t == CHOKE:
c.download.got_choke()
elif t == UNCHOKE:
c.download.got_unchoke()
elif t == INTERESTED:
c.upload.got_interested()
elif t == NOT_INTERESTED:
c.upload.got_not_interested()
elif t == HAVE:
if len(message) != 5:
connection.close()
return
i = toint(message[1:])
if i >= self.numpieces:
connection.close()
return
c.download.got_have(i)
elif t == BITFIELD:
try:
b = Bitfield(self.numpieces, message[1:])
except __HOLE__:
connection.close()
return
c.download.got_have_bitfield(b)
elif t == REQUEST:
if len(message) != 13:
connection.close()
return
i = toint(message[1:5])
if i >= self.numpieces:
connection.close()
return
c.got_request(i, toint(message[5:9]),
toint(message[9:]))
elif t == CANCEL:
if len(message) != 13:
connection.close()
return
i = toint(message[1:5])
if i >= self.numpieces:
connection.close()
return
c.upload.got_cancel(i, toint(message[5:9]),
toint(message[9:]))
elif t == PIECE:
if len(message) <= 9:
connection.close()
return
i = toint(message[1:5])
if i >= self.numpieces:
connection.close()
return
if c.download.got_piece(i, toint(message[5:9]), message[9:]):
self.got_piece(i)
else:
connection.close()
|
ValueError
|
dataset/ETHPy150Open Piratenfraktion-Berlin/OwnTube/videoportal/BitTornadoABC/BitTornado/BT1/Connecter.py/Connecter.got_message
|
7,351
|
def set_geometry(self, col, drop=False, inplace=False, crs=None):
"""
Set the GeoDataFrame geometry using either an existing column or
the specified input. By default yields a new object.
The original geometry column is replaced with the input.
Parameters
----------
keys : column label or array
drop : boolean, default True
Delete column to be used as the new geometry
inplace : boolean, default False
Modify the GeoDataFrame in place (do not create a new object)
crs : str/result of fion.get_crs (optional)
Coordinate system to use. If passed, overrides both DataFrame and
col's crs. Otherwise, tries to get crs from passed col values or
DataFrame.
Examples
--------
>>> df1 = df.set_geometry([Point(0,0), Point(1,1), Point(2,2)])
>>> df2 = df.set_geometry('geom1')
Returns
-------
geodataframe : GeoDataFrame
"""
# Most of the code here is taken from DataFrame.set_index()
if inplace:
frame = self
else:
frame = self.copy()
if not crs:
crs = getattr(col, 'crs', self.crs)
to_remove = None
geo_column_name = DEFAULT_GEO_COLUMN_NAME
if isinstance(col, (Series, list, np.ndarray)):
level = col
elif hasattr(col, 'ndim') and col.ndim != 1:
raise ValueError("Must pass array with one dimension only.")
else:
try:
level = frame[col].values
except __HOLE__:
raise ValueError("Unknown column %s" % col)
except:
raise
if drop:
to_remove = col
geo_column_name = DEFAULT_GEO_COLUMN_NAME
else:
geo_column_name = col
if to_remove:
del frame[to_remove]
if isinstance(level, GeoSeries) and level.crs != crs:
# Avoids caching issues/crs sharing issues
level = level.copy()
level.crs = crs
# Check that we are using a listlike of geometries
if not all(isinstance(item, BaseGeometry) or not item for item in level):
raise TypeError("Input geometry column must contain valid geometry objects.")
frame[geo_column_name] = level
frame._geometry_column_name = geo_column_name
frame.crs = crs
frame._invalidate_sindex()
if not inplace:
return frame
|
KeyError
|
dataset/ETHPy150Open geopandas/geopandas/geopandas/geodataframe.py/GeoDataFrame.set_geometry
|
7,352
|
def __init__(self, *args, **kwargs):
super(to_dossier_store, self).__init__(*args, **kwargs)
kvl = kvlayer.client()
feature_indexes = None
try:
conf = yakonfig.get_global_config('dossier.store')
feature_indexes = conf['feature_indexes']
except __HOLE__:
pass
self.store = Store(kvl,
feature_indexes=feature_indexes)
tfidf_path = self.config.get('tfidf_path')
self.tfidf = gensim.models.TfidfModel.load(tfidf_path)
|
KeyError
|
dataset/ETHPy150Open dossier/dossier.models/dossier/models/etl/interface.py/to_dossier_store.__init__
|
7,353
|
def uni(s, encoding=None):
# unicode string feat
if not isinstance(s, unicode):
try:
return unicode(s, encoding)
except:
try:
return unicode(s, 'utf-8')
except __HOLE__:
return unicode(s, 'latin-1')
return s
|
UnicodeDecodeError
|
dataset/ETHPy150Open dossier/dossier.models/dossier/models/etl/interface.py/uni
|
7,354
|
@classmethod
def conf(cls):
logger.debug("Preparing config for snapshot")
nodes = db().query(Node).filter(
Node.status.in_(['ready', 'provisioned', 'deploying', 'error'])
).all()
dump_conf = deepcopy(settings.DUMP)
for node in nodes:
if node.cluster is None:
logger.info("Node {id} is not assigned to an environment, "
"falling back to root".format(id=node.id))
ssh_user = "root"
else:
editable_attrs = objects.Cluster.get_editable_attributes(
node.cluster
)
try:
ssh_user = editable_attrs['service_user']['name']['value']
except __HOLE__:
logger.info("Environment {env} doesn't support non-root "
"accounts on the slave nodes, falling back "
"to root for node-{node}".format(
env=node.cluster_id,
node=node.id))
ssh_user = "root"
host = {
'hostname': objects.Node.get_slave_name(node),
'address': node.ip,
'ssh-user': ssh_user,
'ssh-key': settings.SHOTGUN_SSH_KEY,
}
# save controllers
if 'controller' in node.roles:
dump_conf['dump']['controller']['hosts'].append(host)
# save slaves
dump_conf['dump']['slave']['hosts'].append(host)
# render postgres connection data in dump settings
dump_conf['dump']['local']['objects'].append({
'type': 'postgres',
'dbhost': settings.DATABASE['host'],
'dbname': settings.DATABASE['name'],
'username': settings.DATABASE['user'],
'password': settings.DATABASE['passwd'],
})
# render cobbler coonection data in dump settings
# NOTE: we no need user/password for cobbler
dump_conf['dump']['local']['objects'].append({
'type': 'xmlrpc',
'server': settings.COBBLER_URL,
'methods': [
'get_distros',
'get_profiles',
'get_systems',
],
'to_file': 'cobbler.txt',
})
# inject master host
dump_conf['dump']['master']['hosts'] = [{
'hostname': socket.gethostname(),
'address': settings.MASTER_IP,
'ssh-key': settings.SHOTGUN_SSH_KEY,
}]
logger.debug("Dump conf: %s", str(dump_conf))
return dump_conf
|
KeyError
|
dataset/ETHPy150Open openstack/fuel-web/nailgun/nailgun/task/task.py/DumpTask.conf
|
7,355
|
@cassiopeia.type.core.common.lazyproperty
def runes(self):
"""
Returns:
list<Rune>: the runes in this rune page
"""
runes = {}
for slot in self.data.slots:
try:
runes[slot.runeId] += 1
except __HOLE__:
runes[slot.runeId] = 1
fetched = cassiopeia.riotapi.get_runes(list(runes.keys()))
return {rune: runes[rune.id] for rune in fetched}
|
KeyError
|
dataset/ETHPy150Open meraki-analytics/cassiopeia/cassiopeia/type/core/summoner.py/RunePage.runes
|
7,356
|
def get_range(self, name, min_value=None, max_value=None, default=0):
"""Parses the given int argument, limiting it to the given range.
Args:
name: the name of the argument
min_value: the minimum int value of the argument (if any)
max_value: the maximum int value of the argument (if any)
default: the default value of the argument if it is not given
Returns:
An int within the given range for the argument
"""
value = self.get(name, default)
if value is None:
return value
try:
value = int(value)
except __HOLE__:
value = default
if value is not None:
if max_value is not None:
value = min(value, max_value)
if min_value is not None:
value = max(value, min_value)
return value
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/ext/webapp/mock_webapp.py/MockRequest.get_range
|
7,357
|
def _set_launcher_affinity(self):
try:
self._launcher_pid = self.device.get_pids_of('com.android.launcher')[0]
result = self.device.execute('taskset -p {}'.format(self._launcher_pid), busybox=True, as_root=True)
self._old_launcher_affinity = int(result.split(':')[1].strip(), 16)
cpu_ids = [i for i, x in enumerate(self.device.core_names) if x == 'a15']
if not cpu_ids or len(cpu_ids) == len(self.device.core_names):
self.logger.debug('Cannot set affinity.')
return
new_mask = reduce(lambda x, y: x | y, cpu_ids, 0x0)
self.device.execute('taskset -p 0x{:X} {}'.format(new_mask, self._launcher_pid), busybox=True, as_root=True)
except __HOLE__:
raise WorkloadError('Could not set affinity of launcher: PID not found.')
|
IndexError
|
dataset/ETHPy150Open ARM-software/workload-automation/wlauto/workloads/applaunch/__init__.py/ApplaunchWorkload._set_launcher_affinity
|
7,358
|
def render(self, context, instance, placeholder):
"""
Return the context to render a DialogFormPlugin
"""
form_type = instance.glossary.get('form_type')
if form_type:
# prevent a malicious database entry to import an ineligible file
form_type = AUTH_FORM_TYPES[[ft[0] for ft in AUTH_FORM_TYPES].index(form_type)]
try:
FormClass = import_string(form_type[2])
except (ImportError, __HOLE__):
# TODO: other unresolvable may need another form name
context['form_name'] = 'auth_form'
else:
context['form_name'] = FormClass.form_name
context[FormClass.form_name] = FormClass()
context['action'] = instance.link
return super(ShopAuthenticationPlugin, self).render(context, instance, placeholder)
|
IndexError
|
dataset/ETHPy150Open awesto/django-shop/shop/cascade/auth.py/ShopAuthenticationPlugin.render
|
7,359
|
@register.tag('bbcode')
def do_bbcode_rendering(parser, token):
"""
This will render a string containing bbcodes to the corresponding HTML markup.
Usage::
{% bbcode "[b]hello world![/b]" %}
You can use variables instead of constant strings to render bbcode stuff::
{% bbcode contentvar %}
It is possible to store the rendered string into a variable::
{% bbcode "[b]hello world![/b]" as renderedvar %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError('\'{0}\' takes at least one argument'.format(bits[0]))
value = parser.compile_filter(bits[1])
remaining = bits[2:]
asvar = None
seen = set()
while remaining:
option = remaining.pop(0)
if option in seen:
raise TemplateSyntaxError(
'The \'{0}\' option was specified more than once.'.format(option))
elif option == 'as':
try:
var_value = remaining.pop(0)
except __HOLE__:
msg = 'No argument provided to the \'{0}\' tag for the as option.'.format(bits[0])
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = var_value
else:
raise TemplateSyntaxError(
'Unknown argument for \'{0}\' tag: \'{1}\'. The only options '
'available is \'as VAR\'.'.format(bits[0], option))
seen.add(option)
return BBCodeNode(value, asvar)
|
IndexError
|
dataset/ETHPy150Open ellmetha/django-precise-bbcode/precise_bbcode/templatetags/bbcode_tags.py/do_bbcode_rendering
|
7,360
|
def request(self, endpoint, method='GET', blog_url=None,
extra_endpoints=None, params=None):
params = params or {}
method = method.lower()
if not method in ('get', 'post'):
raise TumblpyError('Method must be of GET or POST')
url = self.api_url # http://api.tumblr.com/v2/
if blog_url is not None:
# http://api.tumblr.com/v2/blog/blogname.tumblr.com/
blog_url = blog_url.rstrip('/')
if blog_url.startswith('http://'):
blog_url = blog_url[7:]
url = '%sblog/%s/' % (self.api_url, blog_url)
url = '%s%s' % (url, endpoint)
if extra_endpoints is not None:
# In cases like:
# http://api.tumblr.com/v2/blog/blogname.tumblr.com/posts/type/
# 'type' is extra in the url & thought this was the best way
# Docs: http://www.tumblr.com/docs/en/api/v2#posts
url = '%s/%s' % (url, '/'.join(extra_endpoints))
params, files = _split_params_and_files(params)
params.update(self.default_params)
func = getattr(self.client, method)
try:
if method == 'get':
response = func(url, params=params, allow_redirects=False)
else:
kwargs = {'data': params, 'files': files, 'allow_redirects': False}
if files:
kwargs['params'] = params
response = func(url, **kwargs)
except requests.exceptions.RequestException:
raise TumblpyError('An unknown error occurred.')
if response.status_code == 401:
raise TumblpyAuthError('Error: %s, Message: %s' % (response.status_code, response.content))
content = response.content.decode('utf-8')
try:
if endpoint == 'avatar':
content = {
'response': {
'url': response.headers.get('location')
}
}
else:
content = json.loads(content)
except ValueError:
raise TumblpyError('Unable to parse response, invalid JSON.')
try:
content = content.get('response', {})
except __HOLE__:
raise TumblpyError('Unable to parse response, invalid content returned: %s' % content)
if response.status_code < 200 or response.status_code > 301:
error_message = ''
if content and (content.get('errors') or content.get('error')):
if 'errors' in content:
for error in content['errors']:
error_message = '%s ' % error
elif 'error' in content:
error_message = content['error']
error_message = (error_message or
'There was an error making your request.')
raise TumblpyError(error_message, error_code=response.status_code)
return content
|
AttributeError
|
dataset/ETHPy150Open michaelhelmick/python-tumblpy/tumblpy/api.py/Tumblpy.request
|
7,361
|
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except __HOLE__ as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
|
AttributeError
|
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/utils/tests/test_validation.py/test_check_is_fitted
|
7,362
|
def get_n_rooms(self):
try:
return len(self.db.request("GET", ("/%s_rooms/_design/by_msg_count/"
"_view/by_msg_count/") % self.couchdb_prefix)["rows"])
except __HOLE__:
return 0
|
KeyError
|
dataset/ETHPy150Open sfstpala/Victory-Chat/modules/chat.py/ChatMixIn.get_n_rooms
|
7,363
|
def get(self, ftype, fname):
try:
return self.config_files.read(ftype, fname)
except __HOLE__:
return {}
|
IOError
|
dataset/ETHPy150Open locaweb/haproxy-manager/src/haproxy_manager/manager.py/Manager.get
|
7,364
|
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
if request.GET:
if 'language' in request.GET:
if check_for_language(request.GET['language']):
activate(request.GET['language'])
if packages is None:
packages = ['django.conf']
if type(packages) in (str, unicode):
packages = packages.split('+')
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
default_locale = to_locale(settings.LANGUAGE_CODE)
locale = to_locale(get_language())
t = {}
paths = []
# first load all english languages files for defaults
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(p.__file__), 'locale')
paths.append(path)
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except __HOLE__:
# 'en' catalog was missing. This is harmless.
pass
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
src = [LibHead]
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':',1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=',1)[1]
src.append(PluralIdx % plural)
else:
src.append(SimplePlural)
csrc = []
pdict = {}
for k, v in t.items():
if k == '':
continue
if type(k) in (str, unicode):
csrc.append("catalog['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(v)))
elif type(k) == tuple:
if k[0] not in pdict:
pdict[k[0]] = k[1]
else:
pdict[k[0]] = max(k[1], pdict[k[0]])
csrc.append("catalog['%s'][%d] = '%s';\n" % (javascript_quote(k[0]), k[1], javascript_quote(v)))
else:
raise TypeError, k
csrc.sort()
for k,v in pdict.items():
src.append("catalog['%s'] = [%s];\n" % (javascript_quote(k), ','.join(["''"]*(v+1))))
src.extend(csrc)
src.append(LibFoot)
src.append(InterPolate)
src = ''.join(src)
return http.HttpResponse(src, 'text/javascript')
|
IOError
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/views/i18n.py/javascript_catalog
|
7,365
|
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# Attempt to use the environment first
try:
columns = int(os.environ["COLUMNS"])
except (__HOLE__, ValueError):
columns = 0
try:
lines = int(os.environ["LINES"])
except (KeyError, ValueError):
lines = 0
# Only query if necessary
if columns <= 0 or lines <= 0:
try:
columns, lines = _get_terminal_size(sys.__stdout__.fileno())
except (NameError, OSError):
pass
# Use fallback as last resort
if columns <= 0 and lines <= 0:
columns, lines = fallback
return terminal_size(columns, lines)
|
KeyError
|
dataset/ETHPy150Open chrippa/livestreamer/src/livestreamer_cli/packages/shutil_backport.py/get_terminal_size
|
7,366
|
@classmethod
def validate(cls, job_config):
"""Inherit docs."""
super(ModelDatastoreInputReader, cls).validate(job_config)
params = job_config.input_reader_params
entity_kind = params[cls.ENTITY_KIND_PARAM]
# Fail fast if Model cannot be located.
try:
model_class = util.for_name(entity_kind)
except __HOLE__, e:
raise errors.BadReaderParamsError("Bad entity kind: %s" % e)
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if issubclass(model_class, db.Model):
cls._validate_filters(filters, model_class)
else:
cls._validate_filters_ndb(filters, model_class)
property_range.PropertyRange(filters, entity_kind)
|
ImportError
|
dataset/ETHPy150Open GoogleCloudPlatform/appengine-mapreduce/python/src/mapreduce/api/map_job/model_datastore_input_reader.py/ModelDatastoreInputReader.validate
|
7,367
|
def unzip_to_directory_tree(drop_dir, filepath):
hint_rx = re.compile(r'_((?:un)?hinted)/(.+)')
plain_rx = re.compile(r'[^/]+')
zf = zipfile.ZipFile(filepath, 'r')
print 'extracting files from %s to %s' % (filepath, drop_dir)
count = 0
mapped_names = []
unmapped = []
for name in zf.namelist():
# skip names representing portions of the path
if name.endswith('/'):
continue
# get the blob
try:
data = zf.read(name)
except __HOLE__:
print 'did not find %s in zipfile' % name
continue
result = hint_rx.search(name)
if result:
# we know where it goes
subdir = result.group(1)
filename = result.group(2)
write_data_to_file(data, drop_dir, subdir, filename)
count += 1
continue
result = plain_rx.match(name)
if not result:
print "subdir structure without hint/unhint: '%s'" % name
continue
# we have to figure out where it goes.
# if it's a .ttf file, we look for 'fpgm'
# and 'prep' and if they are present, we put
# it into hinted, else unhinted.
# if it's not a .ttf file, but it starts with
# the name of a .ttf file (sans suffix), we put
# it in the same subdir the .ttf file went into.
# else we put it at drop_dir (no subdir).
if name.endswith('.ttf'):
blobfile = cStringIO.StringIO(data)
font = ttLib.TTFont(blobfile)
subdir = 'hinted' if font.get('fpgm') or font.get('prep') else 'unhinted'
write_data_to_file(data, drop_dir, subdir, name)
count += 1
basename = os.path.splitext(name)[0]
mapped_names.append((basename, subdir))
continue
# get to these later
unmapped.append((name, data))
# write the remainder
if unmapped:
for name, data in unmapped:
subdir = ''
for mapped_name, mapped_subdir in mapped_names:
if name.startswith(mapped_name):
subdir = mapped_subdir
break
write_data_to_file(data, drop_dir, subdir, name)
count += 1
print 'extracted %d files' % count
|
KeyError
|
dataset/ETHPy150Open googlei18n/nototools/nototools/grab_mt_download.py/unzip_to_directory_tree
|
7,368
|
def verify(inputhashes, **kwargs):
"""
Checks if the group specified is present with the exact matching
configuration provided.
"""
failed = []
for group in inputhashes:
name = group['name']
try:
result = grp.getgrnam(name)
if 'gid' in group:
if int(group['gid']) != result.gr_gid:
raise VerifyError
except __HOLE__, VerifyError:
failed.append(group)
return failed
|
KeyError
|
dataset/ETHPy150Open gosquadron/squadron/squadron/libraries/group/__init__.py/verify
|
7,369
|
def apply(inputhashes, log):
"""
Adds the group to the system. If the group is currently present, it fails
as we can't yet modify groups.
"""
failed = []
for group in inputhashes:
name = group['name']
try:
result = grp.getgrnam(name)
# Can't modify groups that already exist...yet
log.error('Group %s already present, can\'t modify', name)
failed.append(group)
except __HOLE__:
# this is the normal path
args = ['groupadd']
if 'system' in group:
if bool(group['system']):
args.append('--system')
if 'gid' in group:
args.append('--gid')
args.append(group['gid'])
args.append(name)
result = subprocess.call(args)
log.debug('groupadd for %s result %s', args, result)
if result != 0:
failed.append(group)
return failed
|
KeyError
|
dataset/ETHPy150Open gosquadron/squadron/squadron/libraries/group/__init__.py/apply
|
7,370
|
def confirm_input(user_input):
"""Check user input for yes, no, or an exit signal"""
if isinstance(user_input, list):
user_input = ''.join(user_input)
try:
u_inp = user_input.lower().strip()
except __HOLE__:
u_inp = user_input
# Check for exit signal
if u_inp in ('q', 'quit', 'exit'):
sys.exit()
if u_inp in ('y', 'yes'):
return True
return False
|
AttributeError
|
dataset/ETHPy150Open huntrar/scrape/scrape/utils.py/confirm_input
|
7,371
|
def remove_file(filename):
"""Remove a file from disk"""
try:
os.remove(filename)
return True
except (__HOLE__, IOError):
return False
|
OSError
|
dataset/ETHPy150Open huntrar/scrape/scrape/utils.py/remove_file
|
7,372
|
def overwrite_file_check(args, filename):
"""If filename exists, overwrite or modify it to be unique"""
if not args['overwrite'] and os.path.exists(filename):
# Confirm overwriting of the file, or modify filename
if args['no_overwrite']:
overwrite = False
else:
try:
overwrite = confirm_input(input('Overwrite {0}? (yes/no): '
.format(filename)))
except (__HOLE__, EOFError):
sys.exit()
if not overwrite:
new_filename = modify_filename_id(filename)
while os.path.exists(new_filename):
new_filename = modify_filename_id(new_filename)
return new_filename
return filename
|
KeyboardInterrupt
|
dataset/ETHPy150Open huntrar/scrape/scrape/utils.py/overwrite_file_check
|
7,373
|
def write_pdf_files(args, infilenames, outfilename):
"""Write PDF file(s) to disk using pdfkit
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- name of output PDF file (str)
"""
# Modifies filename if user does not wish to overwrite
outfilename = overwrite_file_check(args, outfilename)
options = {}
try:
if args['multiple']:
# Multiple files are written one at a time, so infilenames will
# never contain more than one file here
infilename = infilenames[0]
if not args['quiet']:
print('Attempting to write to {0}.'.format(outfilename))
else:
options['quiet'] = None
if args['xpath']:
# Process HTML with XPath before writing
html = parse_html(read_files(infilename), args['xpath'])
if isinstance(html, list):
if isinstance(html[0], str):
pk.from_string('\n'.join(html), outfilename,
options=options)
else:
pk.from_string('\n'.join(lh.tostring(x) for x in html),
outfilename, options=options)
elif isinstance(html, str):
pk.from_string(html, outfilename, options=options)
else:
pk.from_string(lh.tostring(html), outfilename,
options=options)
else:
pk.from_file(infilename, outfilename, options=options)
elif args['single']:
if not args['quiet']:
print('Attempting to write {0} page(s) to {1}.'
.format(len(infilenames), outfilename))
else:
options['quiet'] = None
if args['xpath']:
# Process HTML with XPath before writing
html = parse_html(read_files(infilenames), args['xpath'])
if isinstance(html, list):
if isinstance(html[0], str):
pk.from_string('\n'.join(html), outfilename,
options=options)
else:
pk.from_string('\n'.join(lh.tostring(x) for x in html),
outfilename, options=options)
elif isinstance(html, str):
pk.from_string(html, outfilename, options=options)
else:
pk.from_string(lh.tostring(html), outfilename,
options=options)
else:
pk.from_file(infilenames, outfilename, options=options)
return True
except (__HOLE__, IOError) as err:
sys.stderr.write('An error occurred while writing {0}:\n{1}'
.format(outfilename, str(err)))
return False
|
OSError
|
dataset/ETHPy150Open huntrar/scrape/scrape/utils.py/write_pdf_files
|
7,374
|
def write_file(data, outfilename):
"""Write a single file to disk"""
if not data:
return False
try:
with open(outfilename, 'w') as outfile:
for line in data:
if line:
outfile.write(line)
return True
except (OSError, __HOLE__) as err:
sys.stderr.write('An error occurred while writing {0}:\n{1}'
.format(outfilename, str(err)))
return False
|
IOError
|
dataset/ETHPy150Open huntrar/scrape/scrape/utils.py/write_file
|
7,375
|
def write_part_images(url, raw_html, html, filename):
"""Write image file(s) associated with HTML to disk, substituting filenames
Keywords arguments:
url -- the URL from which the HTML has been extracted from (str)
raw_html -- unparsed HTML file content (list)
html -- parsed HTML file content (lxml.html.HtmlElement) (default: None)
filename -- the PART.html filename (str)
Return raw HTML with image names replaced with local image filenames.
"""
save_dirname = '{0}_files'.format(os.path.splitext(filename)[0])
if not os.path.exists(save_dirname):
os.makedirs(save_dirname)
images = html.xpath('//img/@src')
internal_image_urls = [x for x in images if x.startswith('/')]
headers = {'User-Agent': random.choice(USER_AGENTS)}
for img_url in images:
img_name = img_url.split('/')[-1]
if "?" in img_name:
img_name = img_name.split('?')[0]
if not os.path.splitext(img_name)[1]:
img_name = '{0}.jpeg'.format(img_name)
try:
full_img_name = os.path.join(save_dirname, img_name)
with open(full_img_name, 'wb') as img:
if img_url in internal_image_urls:
# Internal images need base url added
full_img_url = '{0}{1}'.format(url.rstrip('/'), img_url)
else:
# External image
full_img_url = img_url
img_content = requests.get(full_img_url, headers=headers,
proxies=get_proxies()).content
img.write(img_content)
raw_html = raw_html.replace(escape(img_url), full_img_name)
except (OSError, __HOLE__):
pass
time.sleep(random.uniform(0, 0.5)) # Slight delay between downloads
return raw_html
|
IOError
|
dataset/ETHPy150Open huntrar/scrape/scrape/utils.py/write_part_images
|
7,376
|
def stop(self, timeout=None):
"""
Stop the producer (async mode). Blocks until async thread completes.
"""
if timeout is not None:
log.warning('timeout argument to stop() is deprecated - '
'it will be removed in future release')
if not self.async:
log.warning('producer.stop() called, but producer is not async')
return
if self.stopped:
log.warning('producer.stop() called, but producer is already stopped')
return
if self.async:
self.queue.put((STOP_ASYNC_PRODUCER, None, None))
self.thread_stop_event.set()
self.thread.join()
if hasattr(self, '_cleanup_func'):
# Remove cleanup handler now that we've stopped
# py3 supports unregistering
if hasattr(atexit, 'unregister'):
atexit.unregister(self._cleanup_func) # pylint: disable=no-member
# py2 requires removing from private attribute...
else:
# ValueError on list.remove() if the exithandler no longer exists
# but that is fine here
try:
atexit._exithandlers.remove( # pylint: disable=no-member
(self._cleanup_func, (self,), {}))
except __HOLE__:
pass
del self._cleanup_func
self.stopped = True
|
ValueError
|
dataset/ETHPy150Open dpkp/kafka-python/kafka/producer/base.py/Producer.stop
|
7,377
|
@Override(Experiment)
def do_should_finish(self):
message = '@@@'.join(('status', self.shared_secret))
json_response = self._send_message(message)
if not json_response:
return 10
try:
response = int(json_response)
except __HOLE__:
return 10
else:
return response
|
ValueError
|
dataset/ETHPy150Open weblabdeusto/weblabdeusto/server/src/experiments/labview_remote_panels.py/LabviewRemotePanels.do_should_finish
|
7,378
|
def test_search_invalid_query_as_json(self):
args = {
'output_mode': 'json',
'exec_mode': 'normal'
}
try:
self.service.jobs.create('invalid query', **args)
except SyntaxError as pe:
self.fail("Something went wrong with parsing the REST API response. %s" % pe.message)
except __HOLE__ as he:
self.assertEqual(he.status, 400)
except Exception as e:
self.fail("Got some unexpected error. %s" % e.message)
|
HTTPError
|
dataset/ETHPy150Open splunk/splunk-sdk-python/tests/test_job.py/TestJob.test_search_invalid_query_as_json
|
7,379
|
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except __HOLE__, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
# We don't know this setting. Give a warning.
print >> stderr, ('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting))
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
|
ValueError
|
dataset/ETHPy150Open adobe/brackets-shell/gyp/pylib/gyp/MSVSSettings.py/ConvertToMSBuildSettings
|
7,380
|
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except __HOLE__, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
print >> stderr, ('Warning: unrecognized setting %s/%s' %
(tool_name, setting))
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
|
ValueError
|
dataset/ETHPy150Open adobe/brackets-shell/gyp/pylib/gyp/MSVSSettings.py/_ValidateSettings
|
7,381
|
def get_ingress_ip(k8s, service_name):
"""Gets the public IP address of the service that maps to the remote
builder."""
service = k8s.get_service(service_name)
try:
return service['status']['loadBalancer']['ingress'][0]['ip']
except __HOLE__:
raise KubernetesError(
'Service {} does not have an external load balancer.'.format(
service_name))
|
KeyError
|
dataset/ETHPy150Open jonparrott/noel/noel/noel/builder/remote.py/get_ingress_ip
|
7,382
|
def is_valid_url(s):
"""Returns `True` if the given string is a valid URL. This calls
Django's `URLValidator()`, but does not raise an exception.
"""
try:
validate_url(s)
return True
except __HOLE__:
return False
|
ValidationError
|
dataset/ETHPy150Open lsaffre/lino/lino/core/utils.py/is_valid_url
|
7,383
|
def is_valid_email(s):
"""Returns `True` if the given string is a valid email. This calls
Django's `validate_email()`, but does not raise an exception.
"""
try:
validate_email(s)
return True
except __HOLE__:
return False
|
ValidationError
|
dataset/ETHPy150Open lsaffre/lino/lino/core/utils.py/is_valid_email
|
7,384
|
def resolve_app(app_label, strict=False):
"""Return the `modules` module of the given `app_label` if it is
installed. Otherwise return either the :term:`dummy module` for
`app_label` if it exists, or `None`.
If the optional second argument `strict` is `True`, raise
ImportError if the app is not installed.
This function is designed for use in models modules and available
through the shortcut ``dd.resolve_app``.
For example, instead of writing::
from lino.modlib.sales import models as sales
it is recommended to write::
sales = dd.resolve_app('sales')
because it makes your code usable (1) in applications that don't
have the 'sales' module installed and (2) in applications who have
another implementation of the `sales` module
(e.g. :mod:`lino.modlib.auto.sales`)
"""
#~ app_label = app_label
for app_name in settings.INSTALLED_APPS:
if app_name == app_label or app_name.endswith('.' + app_label):
return import_module('.models', app_name)
try:
return import_module('lino.modlib.%s.dummy' % app_label)
except __HOLE__:
if strict:
#~ raise
raise ImportError("No app_label %r in %s" %
(app_label, settings.INSTALLED_APPS))
|
ImportError
|
dataset/ETHPy150Open lsaffre/lino/lino/core/utils.py/resolve_app
|
7,385
|
def navinfo(qs, elem):
"""Return a dict with navigation information for the given model
instance `elem` within the given queryset. The dictionary
contains the following keys:
:recno: row number (index +1) of elem in qs
:first: pk of the first element in qs (None if qs is empty)
:prev: pk of the previous element in qs (None if qs is empty)
:next: pk of the next element in qs (None if qs is empty)
:last: pk of the last element in qs (None if qs is empty)
:message: text "Row x of y" or "No navigation"
"""
first = None
prev = None
next = None
last = None
recno = 0
message = None
#~ LEN = ar.get_total_count()
if isinstance(qs, (list, tuple)):
LEN = len(qs)
id_list = [obj.pk for obj in qs]
#~ logger.info('20130714')
else:
LEN = qs.count()
# this algorithm is clearly quicker on queries with a few thousand rows
id_list = list(qs.values_list('pk', flat=True))
if LEN > 0:
"""
Uncommented the following assert because it failed in certain circumstances
(see `/blog/2011/1220`)
"""
#~ assert len(id_list) == ar.total_count, \
#~ "len(id_list) is %d while ar.total_count is %d" % (len(id_list),ar.total_count)
#~ print 20111220, id_list
try:
i = id_list.index(elem.pk)
except __HOLE__:
pass
else:
recno = i + 1
first = id_list[0]
last = id_list[-1]
if i > 0:
prev = id_list[i - 1]
if i < len(id_list) - 1:
next = id_list[i + 1]
message = _("Row %(rowid)d of %(rowcount)d") % dict(
rowid=recno, rowcount=LEN)
if message is None:
message = _("No navigation")
return dict(
first=first, prev=prev, next=next, last=last, recno=recno,
message=message)
# class Handle(object):
# """Base class for :class:`lino.core.tables.TableHandle`,
# :class:`lino.core.frames.FrameHandle` etc.
# The "handle" of an actor is responsible for expanding layouts into
# sets of (renderer-specific) widgets (called "elements"). This
# operation is done once per actor per renderer.
# """
# # def __init__(self):
# # self.ui = settings.SITE.kernel.default_ui
# def setup(self, ar):
# settings.SITE.kernel.setup_handle(self, ar)
# # self.ui.setup_handle(self, ar)
|
ValueError
|
dataset/ETHPy150Open lsaffre/lino/lino/core/utils.py/navinfo
|
7,386
|
def close(self):
"""Close the contacless reader device."""
with self.lock:
if self.dev:
try: self.dev.close()
except __HOLE__: pass
self.dev = None
|
IOError
|
dataset/ETHPy150Open javgh/greenaddress-pos-tools/nfc/clf.py/ContactlessFrontend.close
|
7,387
|
def connect(self, **options):
"""Connect with a contactless target or become connected as a
contactless target. The calling thread is blocked until a
single activation and deactivation has completed or a callback
function supplied as the keyword argument ``terminate``
returned True. The result of the terminate function also
applies to the loop run after activation, so the example below
will make :meth:`~connect()` return after 10 seconds from
either waiting for a peer device or when connected. ::
>>> import nfc, time
>>> clf = nfc.ContactlessFrontend('usb')
>>> after5s = lambda: time.time() - started > 5
>>> started = time.time(); clf.connect(llcp={}, terminate=after5s)
Connect options are given as keyword arguments with dictionary
values. Possible options are:
* ``rdwr={key: value, ...}`` - options for reader/writer operation
* ``llcp={key: value, ...}`` - options for peer to peer mode operation
* ``card={key: value, ...}`` - options for card emulation operation
**Reader/Writer Options**
'targets': sequence
A list of target specifications with each target of either
type :class:`~nfc.clf.TTA`, :class:`~nfc.clf.TTB`, or
:class:`~nfc.clf.TTF`. A default set is choosen if 'targets'
is not provided.
'on-startup': function
A function that will be called with the list of targets
(from 'targets') to search for. Must return a list of
targets or :const:`None`. Only the targets returned are
finally considered.
'on-connect': function
A function object that will be called with an activated
:class:`~nfc.tag.Tag` object.
>>> import nfc
>>> def connected(tag):
... print tag
... return True
...
>>> clf = nfc.ContactlessFrontend()
>>> clf.connect(rdwr={'on-connect': connected})
Type3Tag IDm=01010501b00ac30b PMm=03014b024f4993ff SYS=12fc
True
**Peer To Peer Options**
'on-startup': function
A function that is called before an attempt is made to
establish peer to peer communication. The function receives
the initialized :class:`~nfc.llcp.llc.LogicalLinkController`
instance as parameter, which may then be used to allocate
and bind communication sockets for service applications. The
return value must be either the
:class:`~nfc.llcp.llc.LogicalLinkController` instance or
:const:`None` to effectively remove llcp from the options
considered.
'on-connect': function
A function that is be called when peer to peer communication
was established. The function receives the connected
:class:`~nfc.llcp.llc.LogicalLinkController` instance as
parameter, which may then be used to allocate communication
sockets with
:meth:`~nfc.llcp.llc.LogicalLinkController.socket` and spawn
working threads to perform communication. The callback must
return more or less immediately with :const:`True` unless
the logical link controller run loop is handled within the
callback.
'role': string
Defines which role the local LLC shall take for the data
exchange protocol activation. Possible values are
'initiator' and 'target'. The default is to alternate
between both roles until communication is established.
'miu': integer
Defines the maximum information unit size that will be
supported and announced to the remote LLC. The default value
is 128.
'lto': integer
Defines the link timeout value (in milliseconds) that will
be announced to the remote LLC. The default value is 100
milliseconds.
'agf': boolean
Defines if the local LLC performs PDU aggregation and may
thus send Aggregated Frame (AGF) PDUs to the remote LLC. The
dafault is to use aggregation.
>>> import nfc
>>> import threading
>>> def worker(socket):
... socket.sendto("Hi there!", address=16)
... socket.close()
...
>>> def connected(llc):
... socket = llc.socket(nfc.llcp.LOGICAL_DATA_LINK)
... threading.Thread(target=worker, args=(socket,)).start()
... return True
...
>>> clf = nfc.ContactlessFrontend()
>>> clf.connect(llcp={'on-connect': connected})
**Card Emulation Options**
'targets': sequence
A list of target specifications with each target of either
type :class:`~nfc.clf.TTA`, :class:`~nfc.clf.TTB`, or
:class:`~nfc.clf.TTF`. The list of targets is processed
sequentially. Defaults to an empty list.
'on-startup': function
A function that will be called with the list of targets
(from 'targets') to emulate. Must return a list of one
target choosen or :const:`None`.
'on-connect': function
A function that will be called with an activated
:class:`~nfc.tag.TagEmulation` instance as first parameter and
the first command received as the second parameter.
'on-release': function A function that will be called when the
activated tag has been released by it's Initiator, basically
that is when the tag has been removed from the Initiator's
RF field.
'timeout': integer
The timeout in seconds to wait for for each target to become
initialized. The default value is 1 second.
>>> import nfc
>>>
>>> def connected(tag, command):
... print tag
... print str(command).encode("hex")
...
>>> clf = nfc.ContactlessFrontend()
>>> idm = bytearray.fromhex("01010501b00ac30b")
>>> pmm = bytearray.fromhex("03014b024f4993ff")
>>> sys = bytearray.fromhex("12fc")
>>> target = nfc.clf.TTF(212, idm, pmm, sys)
>>> clf.connect(card={'targets': [target], 'on-connect': connected})
Type3TagEmulation IDm=01010501b00ac30b PMm=03014b024f4993ff SYS=12fc
100601010501b00ac30b010b00018000
True
Connect returns :const:`None` if no options were to execute,
:const:`False` if interrupted by a :exc:`KeyboardInterrupt`,
or :const:`True` if terminated normally and the 'on-connect'
callback function had returned :const:`True`. If the
'on-connect' callback had returned :const:`False` the return
value of connect() is the same parameters as were provided to
the callback function.
Connect raises :exc:`IOError(errno.ENODEV)` if called before a
contactless reader was opened.
"""
if self.dev is None:
raise IOError(errno.ENODEV, os.strerror(errno.ENODEV))
log.debug("connect({0})".format(options))
terminate = options.get('terminate', lambda: False)
rdwr_options = options.get('rdwr')
llcp_options = options.get('llcp')
card_options = options.get('card')
if isinstance(rdwr_options, dict):
rdwr_options.setdefault('targets', [
TTA(br=106, cfg=None, uid=None), TTB(br=106),
TTF(br=424, idm=None, pmm=None, sys=None),
TTF(br=212, idm=None, pmm=None, sys=None)])
if 'on-startup' in rdwr_options:
targets = rdwr_options.get('targets')
targets = rdwr_options['on-startup'](self, targets)
if targets is None: rdwr_options = None
else: rdwr_options['targets'] = targets
if rdwr_options is not None:
if not 'on-connect' in rdwr_options:
rdwr_options['on-connect'] = lambda tag: True
elif rdwr_options is not None:
raise TypeError("argument *rdrw* must be a dictionary")
if isinstance(llcp_options, dict):
llc = nfc.llcp.llc.LogicalLinkController(
recv_miu=llcp_options.get('miu', 128),
send_lto=llcp_options.get('lto', 100),
send_agf=llcp_options.get('agf', True),
symm_log=llcp_options.get('symm-log', True))
if 'on-startup' in llcp_options:
llc = llcp_options['on-startup'](self, llc)
if llc is None: llcp_options = None
if llcp_options is not None:
if not 'on-connect' in llcp_options:
llcp_options['on-connect'] = lambda llc: True
elif llcp_options is not None:
raise TypeError("argument *llcp* must be a dictionary")
if isinstance(card_options, dict):
if 'on-startup' in card_options:
targets = card_options.get('targets', [])
targets = card_options['on-startup'](self, targets)
if targets is None: card_options = None
else: card_options['targets'] = targets
if card_options is not None:
if not card_options.get('targets'):
log.error("a target must be specified to connect as tag")
return None
if not 'on-connect' in card_options:
card_options['on-connect'] = lambda tag, command: True
elif card_options is not None:
raise TypeError("argument *card* must be a dictionary")
some_options = rdwr_options or llcp_options or card_options
if not some_options:
log.warning("no options left to connect")
return None
try:
while not terminate():
if llcp_options:
result = self._llcp_connect(llcp_options, llc, terminate)
if bool(result) is True: return result
if rdwr_options:
result = self._rdwr_connect(rdwr_options, terminate)
if bool(result) is True: return result
if card_options:
result = self._card_connect(card_options, terminate)
if bool(result) is True: return result
except KeyboardInterrupt as error:
log.debug(error)
return False
except __HOLE__ as error:
log.error(error)
return False
|
IOError
|
dataset/ETHPy150Open javgh/greenaddress-pos-tools/nfc/clf.py/ContactlessFrontend.connect
|
7,388
|
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except __HOLE__:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
|
TypeError
|
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/cluster/bicluster.py/SpectralBiclustering._check_parameters
|
7,389
|
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except __HOLE__:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
|
TypeError
|
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/cluster/bicluster.py/SpectralBiclustering._fit
|
7,390
|
def entries(self, fileids, **kwargs):
if 'key' in kwargs:
key = kwargs['key']
del kwargs['key']
else:
key = 'lx' # the default key in MDF
entries = []
for marker, contents in self.fields(fileids, **kwargs):
if marker == key:
entries.append((contents, []))
else:
try:
entries[-1][-1].append((marker, contents))
except __HOLE__:
pass
return entries
|
IndexError
|
dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/toolbox.py/ToolboxCorpusReader.entries
|
7,391
|
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
If multiple header fields with the same name occur, they are combined
according to the rules in RFC 2616 sec 4.2:
Appending each subsequent field-value to the first, each separated
by a comma. The order in which header fields with the same field-name
are received is significant to the interpretation of the combined
field value.
"""
# XXX The implementation overrides the readheaders() method of
# rfc822.Message. The base class design isn't amenable to
# customized behavior here so the method here is a copy of the
# base class code with a few small changes.
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while True:
if tell:
try:
startofline = tell()
except __HOLE__:
startofline = tell = None
self.seekable = 0
line = self.fp.readline()
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# XXX Not sure if continuation lines are handled properly
# for http and/or for repeating headers
# It's a continuation line.
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
hlist.append(line)
self.addheader(headerseen, line[len(headerseen)+1:].strip())
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
|
IOError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/httplib.py/HTTPMessage.readheaders
|
7,392
|
def _read_status(self):
# Initialize with Simple-Response defaults
line = self.fp.readline()
if self.debuglevel > 0:
print "reply:", repr(line)
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
[version, status, reason] = line.split(None, 2)
except ValueError:
try:
[version, status] = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail and status
# will be treated as 0.9 response.
version = ""
if not version.startswith('HTTP/'):
if self.strict:
self.close()
raise BadStatusLine(line)
else:
# assume it's a Simple-Response from an 0.9 server
self.fp = LineAndFileWrapper(line, self.fp)
return "HTTP/0.9", 200, ""
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except __HOLE__:
raise BadStatusLine(line)
return version, status, reason
|
ValueError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/httplib.py/HTTPResponse._read_status
|
7,393
|
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline().strip()
if not skip:
break
if self.debuglevel > 0:
print "header:", skip
self.status = status
self.reason = reason.strip()
if version == 'HTTP/1.0':
self.version = 10
elif version.startswith('HTTP/1.'):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
elif version == 'HTTP/0.9':
self.version = 9
else:
raise UnknownProtocol(version)
if self.version == 9:
self.length = None
self.chunked = 0
self.will_close = 1
self.msg = HTTPMessage(StringIO())
return
self.msg = HTTPMessage(self.fp, 0)
if self.debuglevel > 0:
for hdr in self.msg.headers:
print "header:", hdr,
# don't let the msg keep an fp
self.msg.fp = None
# are we using the chunked-style of transfer encoding?
tr_enc = self.msg.getheader('transfer-encoding')
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = 1
self.chunk_left = None
else:
self.chunked = 0
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
length = self.msg.getheader('content-length')
if length and not self.chunked:
try:
self.length = int(length)
except __HOLE__:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == 'HEAD'):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if not self.will_close and \
not self.chunked and \
self.length is None:
self.will_close = 1
|
ValueError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/httplib.py/HTTPResponse.begin
|
7,394
|
def _read_chunked(self, amt):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = ''
# XXX This accumulates chunks by repeated string concatenation,
# which is not efficient as the number or size of chunks gets big.
while True:
if chunk_left is None:
line = self.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except __HOLE__:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise IncompleteRead(value)
if chunk_left == 0:
break
if amt is None:
value += self._safe_read(chunk_left)
elif amt < chunk_left:
value += self._safe_read(amt)
self.chunk_left = chunk_left - amt
return value
elif amt == chunk_left:
value += self._safe_read(amt)
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return value
else:
value += self._safe_read(chunk_left)
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline()
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return value
|
ValueError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/httplib.py/HTTPResponse._read_chunked
|
7,395
|
def _set_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except __HOLE__:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
self.host = host
self.port = port
|
ValueError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/httplib.py/HTTPConnection._set_hostport
|
7,396
|
def _send_request(self, method, url, body, headers):
# honour explicitly requested Host: and Accept-Encoding headers
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if body and ('content-length' not in header_names):
thelen=None
try:
thelen=str(len(body))
except __HOLE__, te:
# If this is a file-like object, try to
# fstat its file descriptor
import os
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print "Cannot stat!!"
if thelen is not None:
self.putheader('Content-Length',thelen)
for hdr, value in headers.iteritems():
self.putheader(hdr, value)
self.endheaders()
if body:
self.send(body)
|
TypeError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/httplib.py/HTTPConnection._send_request
|
7,397
|
def test():
"""Test this module.
A hodge podge of tests collected here, because they have too many
external dependencies for the regular test suite.
"""
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'd')
dl = 0
for o, a in opts:
if o == '-d': dl = dl + 1
host = 'www.python.org'
selector = '/'
if args[0:]: host = args[0]
if args[1:]: selector = args[1]
h = HTTP()
h.set_debuglevel(dl)
h.connect(host)
h.putrequest('GET', selector)
h.endheaders()
status, reason, headers = h.getreply()
print 'status =', status
print 'reason =', reason
print "read", len(h.getfile().read())
print
if headers:
for header in headers.headers: print header.strip()
print
# minimal test that code to extract host from url works
class HTTP11(HTTP):
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
h = HTTP11('www.python.org')
h.putrequest('GET', 'http://www.python.org/~jeremy/')
h.endheaders()
h.getreply()
h.close()
try:
import ssl
except __HOLE__:
pass
else:
for host, selector in (('sourceforge.net', '/projects/python'),
):
print "https://%s%s" % (host, selector)
hs = HTTPS()
hs.set_debuglevel(dl)
hs.connect(host)
hs.putrequest('GET', selector)
hs.endheaders()
status, reason, headers = hs.getreply()
print 'status =', status
print 'reason =', reason
print "read", len(hs.getfile().read())
print
if headers:
for header in headers.headers: print header.strip()
print
|
ImportError
|
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/httplib.py/test
|
7,398
|
def plotXY(data,size = (280,640),margin = 25,name = "data",labels=[], skip = [],
showmax = [], bg = None,label_ndigits = [], showmax_digits=[]):
for x,y in data:
if len(x) < 2 or len(y) < 2:
return
n_plots = len(data)
w = float(size[1])
h = size[0]/float(n_plots)
z = np.zeros((size[0],size[1],3))
if isinstance(bg,np.ndarray):
wd = int(bg.shape[1]/bg.shape[0]*h )
bg = cv2.resize(bg,(wd,int(h)))
if len(bg.shape) == 3:
r = combine(bg[:,:,0],z[:,:,0])
g = combine(bg[:,:,1],z[:,:,1])
b = combine(bg[:,:,2],z[:,:,2])
else:
r = combine(bg,z[:,:,0])
g = combine(bg,z[:,:,1])
b = combine(bg,z[:,:,2])
z = cv2.merge([r,g,b])[:,:-wd,]
i = 0
P = []
for x,y in data:
x = np.array(x)
y = -np.array(y)
xx = (w-2*margin)*(x - x.min()) / (x.max() - x.min())+margin
yy = (h-2*margin)*(y - y.min()) / (y.max() - y.min())+margin + i*h
mx = max(yy)
if labels:
if labels[i]:
for ii in xrange(len(x)):
if ii%skip[i] == 0:
col = (255,255,255)
ss = '{0:.%sf}' % label_ndigits[i]
ss = ss.format(x[ii])
cv2.putText(z,ss,(int(xx[ii]),int((i+1)*h)),
cv2.FONT_HERSHEY_PLAIN,1,col)
if showmax:
if showmax[i]:
col = (0,255,0)
ii = np.argmax(-y)
ss = '{0:.%sf} %s' % (showmax_digits[i], showmax[i])
ss = ss.format(x[ii])
#"%0.0f %s" % (x[ii], showmax[i])
cv2.putText(z,ss,(int(xx[ii]),int((yy[ii]))),
cv2.FONT_HERSHEY_PLAIN,2,col)
try:
pts = np.array([[x_, y_] for x_, y_ in zip(xx,yy)],np.int32)
i+=1
P.append(pts)
except __HOLE__:
pass #temporary
"""
#Polylines seems to have some trouble rendering multiple polys for some people
for p in P:
cv2.polylines(z, [p], False, (255,255,255),1)
"""
#hack-y alternative:
for p in P:
for i in xrange(len(p)-1):
cv2.line(z,tuple(p[i]),tuple(p[i+1]), (255,255,255),1)
cv2.imshow(name,z)
|
ValueError
|
dataset/ETHPy150Open thearn/webcam-pulse-detector/lib/interface.py/plotXY
|
7,399
|
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
if isinstance(axis, MultiIndex):
try:
return axis.get_loc(key)
except Exception:
pass
if isinstance(key, tuple) and not self.ndim < len(key):
return self._convert_tuple(key, is_setter=True)
if isinstance(key, range):
return self._convert_range(key, is_setter=True)
try:
return self._convert_to_indexer(key, is_setter=True)
except __HOLE__ as e:
# invalid indexer type vs 'other' indexing errors
if 'cannot do' in str(e):
raise
raise IndexingError(key)
|
TypeError
|
dataset/ETHPy150Open pydata/pandas/pandas/core/indexing.py/_NDFrameIndexer._get_setitem_indexer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.