Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
1,700
|
@expose("peek-byte", [default(values.W_InputPort, None),
default(values.W_Fixnum, values.W_Fixnum.ZERO)],
simple=False)
def peek_byte(w_port, w_skip, env, cont):
try:
return do_peek(w_port, True, w_skip.value, env, cont)
except __HOLE__:
raise SchemeException("peek-byte: string is not a well-formed UTF-8 encoding")
|
UnicodeDecodeError
|
dataset/ETHPy150Open samth/pycket/pycket/prims/input_output.py/peek_byte
|
1,701
|
@expose("file-size", [values.W_Object])
def file_size(obj):
if not is_path_string(obj):
raise SchemeException("file-size: expected path string")
path = extract_path(obj)
try:
size = os.path.getsize(path)
except __HOLE__:
raise SchemeException("file-size: file %s does not exists" % path)
intsize = intmask(size)
if intsize == size:
return values.W_Fixnum(intsize)
return values.W_Bignum(rbigint.fromrarith_int(size))
|
OSError
|
dataset/ETHPy150Open samth/pycket/pycket/prims/input_output.py/file_size
|
1,702
|
def WaitFor(condition, timeout):
"""Waits for up to |timeout| secs for the function |condition| to return True.
Polling frequency is (elapsed_time / 10), with a min of .1s and max of 5s.
Returns:
Result of |condition| function (if present).
"""
min_poll_interval = 0.1
max_poll_interval = 5
output_interval = 300
def GetConditionString():
if condition.__name__ == '<lambda>':
try:
return inspect.getsource(condition).strip()
except __HOLE__:
pass
return condition.__name__
start_time = time.time()
last_output_time = start_time
while True:
res = condition()
if res:
return res
now = time.time()
elapsed_time = now - start_time
last_output_elapsed_time = now - last_output_time
if elapsed_time > timeout:
raise TimeoutException('Timed out while waiting %ds for %s.' %
(timeout, GetConditionString()))
if last_output_elapsed_time > output_interval:
logging.info('Continuing to wait %ds for %s. Elapsed: %ds.',
timeout, GetConditionString(), elapsed_time)
last_output_time = time.time()
poll_interval = min(max(elapsed_time / 10., min_poll_interval),
max_poll_interval)
time.sleep(poll_interval)
|
IOError
|
dataset/ETHPy150Open chromium/web-page-replay/util.py/WaitFor
|
1,703
|
def _resolve_refs(self, rule_map, expr, done):
"""Return an expression with all its lazy references recursively
resolved.
Resolve any lazy references in the expression ``expr``, recursing into
all subexpressions.
:arg done: The set of Expressions that have already been or are
currently being resolved, to ward off redundant work and prevent
infinite recursion for circular refs
"""
if isinstance(expr, LazyReference):
label = unicode(expr)
try:
reffed_expr = rule_map[label]
except __HOLE__:
raise UndefinedLabel(expr)
return self._resolve_refs(rule_map, reffed_expr, done)
else:
if getattr(expr, 'members', ()) and expr not in done:
# Prevents infinite recursion for circular refs. At worst, one
# of `expr.members` can refer back to `expr`, but it can't go
# any farther.
done.add(expr)
expr.members = [self._resolve_refs(rule_map, member, done)
for member in expr.members]
return expr
|
KeyError
|
dataset/ETHPy150Open erikrose/parsimonious/parsimonious/grammar.py/RuleVisitor._resolve_refs
|
1,704
|
def _RemoveV4Ending(addr_string):
"""Replace v4 endings with v6 equivalents."""
match = V4_ENDING.match(addr_string)
if match:
ipv4_addr = ".".join(match.groups()[1:])
try:
socket.inet_aton(ipv4_addr)
except (socket.error, __HOLE__):
raise socket.error("Illegal IPv4 extension: %s" % addr_string)
if int(match.group(2)) == 0:
raise socket.error("IPv4 can't start with 0")
return "%s:%04x:%04x" % (match.group("v6"),
int(match.group(2)) * 256 + int(match.group(3)),
int(match.group(4)) * 256 + int(match.group(5)))
return addr_string
|
ValueError
|
dataset/ETHPy150Open google/grr/grr/lib/ipv6_utils.py/_RemoveV4Ending
|
1,705
|
def InetAtoN(addr_string):
"""Convert ipv6 string to packed bytes.
Args:
addr_string: IPv6 address string
Returns:
bytestring representing address
Raises:
socket.error: on bad IPv6 address format
"""
if not addr_string:
raise socket.error("Empty address string")
if BAD_SINGLE_COLON.match(addr_string):
raise socket.error("Start or ends with single colon")
if addr_string == "::":
return ("0" * 32).decode("hex_codec")
addr_string = _RemoveV4Ending(addr_string)
addr_string = _StripLeadingOrTrailingDoubleColons(addr_string)
addr_string = _ZeroPad(addr_string)
try:
return addr_string.decode("hex_codec")
except __HOLE__:
raise socket.error("Error decoding: %s" % addr_string)
|
TypeError
|
dataset/ETHPy150Open google/grr/grr/lib/ipv6_utils.py/InetAtoN
|
1,706
|
def test_fminbound_scalar(self):
try:
optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
self.fail("exception not raised")
except __HOLE__ as e:
assert_('must be scalar' in str(e))
x = optimize.fminbound(self.fun, 1, np.array(5))
assert_allclose(x, self.solution, atol=1e-6)
|
ValueError
|
dataset/ETHPy150Open scipy/scipy/scipy/optimize/tests/test_optimize.py/TestOptimizeScalar.test_fminbound_scalar
|
1,707
|
def get_collection(self, request):
"""
Encapsulates collection name.
"""
try:
# If no owner is specified in the request, we use the default from settings for now
# moving forward, we'll want to remove this fallback and require that the owner is specified
# from the owner uuid, we're looking up the internal identifier from the corresponding profile
#pdb.set_trace()
database = None
if (request and "datastore_owner__uuid" in request.GET):
profile, created = Profile.objects.get_or_create(uuid = request.GET["datastore_owner__uuid"])
database = profile.getDBName()
return db[database][self._meta.collection] if database is not None else None
except __HOLE__:
raise ImproperlyConfigured("Define a collection in your resource.")
|
AttributeError
|
dataset/ETHPy150Open HumanDynamics/openPDS/openpds/tastypie_mongodb/resources.py/MongoDBResource.get_collection
|
1,708
|
def undo(self):
"""
Undo the previous command
:raises: IndexError, if there are no objects to undo
"""
try:
c = self._command_stack.pop()
logging.getLogger(__name__).debug("Undo %s", c)
except __HOLE__:
raise IndexError("No commands to undo")
self._undo_stack.append(c)
c.undo(self._session)
self.notify('undo')
|
IndexError
|
dataset/ETHPy150Open glue-viz/glue/glue/core/command.py/CommandStack.undo
|
1,709
|
def redo(self):
"""
Redo the previously-undone command
:raises: IndexError, if there are no undone actions
"""
try:
c = self._undo_stack.pop()
logging.getLogger(__name__).debug("Undo %s", c)
except __HOLE__:
raise IndexError("No commands to redo")
result = c.do(self._session)
self._command_stack.append(c)
self.notify('redo')
return result
|
IndexError
|
dataset/ETHPy150Open glue-viz/glue/glue/core/command.py/CommandStack.redo
|
1,710
|
def get_active_window_id(self):
out = getoutput("xprop -root _NET_ACTIVE_WINDOW")
try:
win_id = out.split('#')[-1].split(',')[0].strip()
except __HOLE__:
return None
return win_id
|
ValueError
|
dataset/ETHPy150Open generalov/look-at/look_at/wmctrl.py/WmCtrl.get_active_window_id
|
1,711
|
@property
def wm_window_role(self):
if not self.window_role:
out = getoutput('xprop -id %s WM_WINDOW_ROLE' % self.id)
try:
_, value = out.split(' = ')
except __HOLE__:
# probably xprop returned an error
return ''
else:
self.window_role = value.strip('"')
return self.window_role
|
ValueError
|
dataset/ETHPy150Open generalov/look-at/look_at/wmctrl.py/Window.wm_window_role
|
1,712
|
def run(self):
po_files = []
if not self.output_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.output_dir, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
else:
for locale in os.listdir(self.output_dir):
po_file = os.path.join(self.output_dir, locale,
'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((self.locale, self.output_file))
domain = self.domain
if not domain:
domain = os.path.splitext(os.path.basename(self.input_file))[0]
infile = open(self.input_file, 'rb')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for locale, filename in po_files:
self.log.info('updating catalog %s based on %s', filename, self.input_file)
infile = open(filename, 'rb')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(
template, self.no_fuzzy_matching,
update_header_comment=self.update_header_comment
)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'wb')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=self.ignore_obsolete,
include_previous=self.previous, width=self.width)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except __HOLE__:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
|
OSError
|
dataset/ETHPy150Open python-babel/babel/babel/messages/frontend.py/update_catalog.run
|
1,713
|
def is_middleware_class(middleware_class, middleware_path):
try:
middleware_cls = import_string(middleware_path)
except __HOLE__:
return
return issubclass(middleware_cls, middleware_class)
|
ImportError
|
dataset/ETHPy150Open django-debug-toolbar/django-debug-toolbar/debug_toolbar/settings.py/is_middleware_class
|
1,714
|
@classmethod
def from_node(cls, member, data):
blob = json.loads(data)
additional_endpoints = blob.get('additionalEndpoints')
if additional_endpoints is None:
raise ValueError("Expected additionalEndpoints in member data")
service_endpoint = blob.get('serviceEndpoint')
if service_endpoint is None:
raise ValueError("Expected serviceEndpoint in member data")
status = blob.get('status')
if status is None:
raise ValueError("Expected status in member data")
shard = blob.get('shard')
if shard is not None:
try:
shard = int(shard)
except __HOLE__:
ROOT_LOG.warn('Unable to parse shard from %r' % shard)
shard = None
return cls(
member=member,
service_endpoint=Endpoint(service_endpoint['host'], service_endpoint['port']),
additional_endpoints=dict((name, Endpoint(value['host'], value['port']))
for name, value in additional_endpoints.items()),
shard=shard,
status=status
)
|
ValueError
|
dataset/ETHPy150Open steveniemitz/scales/scales/loadbalancer/zookeeper.py/Member.from_node
|
1,715
|
def run(self, mapping):
"""
generated_filename will return a filename similar to this:
`20101107__wa__general__precinct.csv`
election will return a filename similar to this:
`20101102__wa__general__precinct`
"""
generated_filename = mapping['generated_filename']
election = mapping['election']
"""
bad_filenames[] holds the list of files who have content that's
hard to use (e.g. an .xls file with 10 sheets).
The edge cases will be taken care of later. The cases where there is
zero actual usable data will have to be rectified outside of the
loader module.
"""
bad_filenames = [
# The below are Excel (.xls) files that have results spread across
# multiple worksheets and in different structures from each other
'20070821__wa__primary.xls',
'20070821__wa__primary__county.xls',
'20080219__wa__primary__adams__precinct.xls',
'20080219__wa__primary__benton__precinct.xls',
'20080219__wa__primary__congressional_district_state_legislative.xls',
'20080219__wa__primary__douglas__precinct.xls',
'20080219__wa__primary__kitsap__precinct.xls',
'20080819__wa__primary__kitsap__precinct.xls',
'20080819__wa__primary__pierce__precinct.xls',
'20081104__wa__general__congressional_district.xls',
'20081104__wa__general__adams__precinct.xls',
'20091103__wa__general__clark__precinct.xls',
'20081104__wa__general__franklin__precinct.xls',
'20081104__wa__general__kittitas__precinct.xls',
'20081104__wa__general__kitsap__precinct.xls',
'20081104__wa__general__pierce__precinct.xls',
'20081104__wa__general__precinct.xls',
'20081104__wa__general__state_legislative.xls',
'20091103__wa__general__kitsap__precinct.xls',
'20091103__wa__general__pierce__precinct.xls',
'20101102__wa__general__kittitas___precinct.xls',
'20101102__wa__general__san_juan___precinct.xls',
'20100817__wa__primary__state_legislative.xls',
'20100817__wa__primary__congressional_district.xls',
'20111108__wa__general__clark___precinct.xlsx',
'20111108__wa__general__spokane___precinct.xlsx',
'20120807__wa__primary__congressional_district.xls',
'20120807__wa__primary__state_legislative.xls',
'20121106__wa__general__congressional_district.xls',
'20121106__wa__general__state_legislative.xls',
]
"""
Could try using `generated_filename.split(.)[-1]` instead of
os.path.splitext(election)[-1], since all filenames are
standardized. This would, of course, break if the file path includes
a full stop (period).
"""
# If files are 'bad', skip them
if any(x in generated_filename for x in bad_filenames):
loader = SkipLoader()
# If files are .xls(x), use the correct loader
elif os.path.splitext(
generated_filename)[-1].lower() in ('.xls', '.xlsx'):
loader = WALoaderExcel()
elif os.path.splitext(generated_filename)[-1].lower() == '.txt':
"""
We run into issues where King County provides > 1 million line
.txt files that break my machine's memory. We definitely need to
refactor, but for the moment we'll pass over said files.
"""
logger.info(
'Cannot do anything with {0}'.format(generated_filename))
loader = SkipLoader()
elif 'precinct' in generated_filename:
loader = WALoaderPrecincts()
elif any(s in election for s in [
'2000',
'2001',
'2002',
'2003',
'2004',
'2005',
'2006']):
loader = WALoaderPre2007()
elif os.path.splitext(
generated_filename)[-1].lower() in ('.csv', '.txt'):
loader = WALoaderPost2007()
else:
loader = SkipLoader()
"""
* UnboundLocalError: File passes through the elif statements, but is
not a file we have a loader class set up to handle at this point, so
loader.run(mapping) is called before it's mentioned
* IOError: File in quesiton does not exist. Seen when the mapping
a file path that recieved a 404 error
* unicodecsv.Error: Similar to UnboundLocalError, this error means
that the loader tried running but the csv parser could not parse
the file because of a null byte. See:
https://github.com/jdunck/python-unicodecsv/blob/master/unicodecsv/test.py#L222
* errors.InvalidOperation: When a file has no useful data, RawResult
is empty and mongodb refuses to load it.
Because of the if/else flow, sometimes we'll end up with multiple
UnboundLocalErrors. This should be changed so we only get the error
once.
"""
try:
loader.run(mapping)
except UnboundLocalError:
logger.error(
'\tUnsupported file type ({0})'
.format('UnboundLocalError'))
except __HOLE__:
logger.error(
'\tFile "{0}" does not exist'
.format(generated_filename))
except unicodecsv.Error:
logger.error(
'\tUnsupported file type "({0})"'
.format('unicodecsv.Error'))
except errors.InvalidOperation:
logger.error('\tNo raw results loaded')
|
IOError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/load.py/LoadResults.run
|
1,716
|
def normalize_district(header, office, row):
"""
Example of what we had before:
'district': '{0} {1}'.format(
self.district_offices[normalize_races(sh_val)],
"".join(map(str, [int(s) for s in sh_val.strip() if s.isdigit()][:2])
))})
normalize_district now provides a more standardized and clean API than
was the case with the mess before.
"""
norm_office = normalize_races(office)
dist_str = "".join(
map(str, [int(s) for s in office.strip() if s.isdigit()][:2]))
bth_regex = re.compile(r'((leg|con).*dis.*)', re.IGNORECASE)
leg_regex = re.compile(r'leg.*dis.*', re.IGNORECASE)
con_regex = re.compile(r'con.*dis.*', re.IGNORECASE)
if not row:
row = {}
try:
row[filter(lambda x: bth_regex.search(x), header)[0]]
if norm_office is 'U.S. Representative':
dist = row[filter(lambda x: leg_regex.search(x), header)[0]]
return dist
if norm_office in ('State Representative', 'State Senate'):
dist = row[filter(lambda x: con_regex.search(x), header)[0]]
return dist
except __HOLE__:
if dist_str is "":
return None
if int(dist_str) > 49:
dist_str = dist_str[:1]
return dist_str
|
IndexError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/load.py/normalize_district
|
1,717
|
def load(self):
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'precinct'
results = []
with self._file_handle as csvfile:
party_flag = 0
district_flag = 0
reader = unicodecsv.DictReader(
csvfile, encoding='latin-1', delimiter=',')
# Declare column indices before the loop so we aren't making
# a method call for each line in the file
self.header = [x.replace('"', '') for x in reader.fieldnames]
self.votes_index = normalize_votes(self.header)
self.contest_index = normalize_contest(self.header)
self.candidate_index = normalize_candidate(
self.header)
self.precinct_index = normalize_precinct(
self.header)
try:
self.party_index = normalize_party(self.header)
except __HOLE__:
pass
for row in reader:
if self._skip_row(row):
continue
else:
self.jurisdiction = row[self.precinct_index].strip()
votes = int(row[self.votes_index].strip())
rr_kwargs = self._common_kwargs.copy()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
rr_kwargs.update({
'reporting_level': 'precinct',
'votes': votes,
'ocd_id': "{}".format(self._get_ocd_id(
self.jurisdiction,
precinct=row[self.precinct_index]))
})
try:
rr_kwargs.update({
'party': row[self.party_index].strip()
})
except (IndexError, KeyError):
party_flag = 1
try:
rr_kwargs.update(
{'district': normalize_district(self.header, row[self.contest_index], row)})
except KeyError:
district_flag = 1
results.append(RawResult(**rr_kwargs))
if 0 is not party_flag:
logger.info('Some rows did not contain party info.')
if 0 is not district_flag:
logger.info('Some rows did not contain district info.')
"""
Many county files *only* have local races, such as schoolboard or
fire chief races. Since openstates does not want these results,
the entire files end up being skipped. To clarify the error message,
we print our own if RawResult tries to insert nothing into mongodb
"""
try:
RawResult.objects.insert(results)
except errors.InvalidOperation:
logger.error('\tNo raw results loaded')
|
IndexError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/load.py/WALoaderPrecincts.load
|
1,718
|
def load(self):
with self._file_handle as csvfile:
results = []
reader = unicodecsv.DictReader(csvfile, encoding='latin-1',
delimiter=',')
self.header = [x.replace('"', '') for x in reader.fieldnames]
try:
self.contest_index = normalize_contest(self.header)
except __HOLE__:
pass
for row in reader:
if self._skip_row(row):
continue
else:
results.append(self._prep_county_results(row))
try:
RawResult.objects.insert(results)
except errors.InvalidOperation:
logger.error('\tNo raw results loaded')
|
IndexError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/load.py/WALoaderPre2007.load
|
1,719
|
def _prep_county_results(self, row):
"""
In Washington our general results are reported by county instead
of precinct, although precinct-level vote tallies are available.
"""
kwargs = self._base_kwargs(row)
county = str(row['jurisdiction'])
kwargs.update({
'reporting_level': row['reporting_level'],
'jurisdiction': county,
'party': row['partycode'].strip(),
'votes': int(row['votes'].strip()),
'ocd_id': "{}".format(self._get_ocd_id(county))
})
try:
kwargs.update({
'district': normalize_district(self.header, row[self.contest_index], row)
})
except __HOLE__:
pass
return RawResult(**kwargs)
|
KeyError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/load.py/WALoaderPre2007._prep_county_results
|
1,720
|
def load(self):
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'county'
results = []
with self._file_handle as csvfile:
district_flag = 0
reader = unicodecsv.DictReader(
csvfile, encoding='latin-1', delimiter=',')
self.header = [x.replace('"', '') for x in reader.fieldnames]
self.contest_index = normalize_contest(self.header)
for row in reader:
if self._skip_row(row):
continue
else:
rr_kwargs = self._common_kwargs.copy()
rr_kwargs['primary_party'] = row['Party'].strip()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
rr_kwargs.update({
'party': row['Party'].strip(),
'votes': int(row['Votes'].strip()),
'ocd_id': "{}".format(self._get_ocd_id(rr_kwargs['jurisdiction'])),
})
try:
rr_kwargs.update(
{'district': normalize_district(self.header, row[self.contest_index], row)})
except __HOLE__:
district_flag = 1
results.append(RawResult(**rr_kwargs))
if 0 is not district_flag:
logger.info('Some rows did not contain district info.')
"""
Many county files *only* have local races, such as schoolboard or
fire chief races. Since openstates does not want these results,
the entire files end up being skipped. To clarify the error message,
we print our own if RawResult tries to insert nothing into mongodb
"""
try:
RawResult.objects.insert(results)
except errors.InvalidOperation:
logger.error('\tNo raw results loaded')
|
KeyError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/load.py/WALoaderPost2007.load
|
1,721
|
def _build_contest_kwargs(self, row):
"""
if 'County' in self.reader.fieldnames:
jurisdiction = row['County']
else:
jurisdiction = row['JurisdictionName']
The above is the same as the code below, except a try/catch is quicker
than an if/else statement. Plus, Python is EAFP, not LBYL.
"""
try:
jurisdiction = row['County'].strip()
except __HOLE__:
name_list = self.source.split('__')[-2:]
jurisdiction = '{0} {1}'.format(
name_list[0],
name_list[1].split('.')[0])
return {
'office': row['Race'].strip(),
'jurisdiction': jurisdiction
}
|
KeyError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/load.py/WALoaderPost2007._build_contest_kwargs
|
1,722
|
def load(self):
xlsfile = xlrd.open_workbook(self._xls_file_path)
self._common_kwargs = self._build_common_election_kwargs()
# Set the correct reporting level based on file name
if 'precinct' in self.mapping['generated_filename']:
reporting_level = 'precinct'
else:
reporting_level = 'county'
self._common_kwargs['reporting_level'] = reporting_level
results = []
sheet = xlsfile.sheet_by_index(0)
"""
I ran into an issue where RawResult wasn't loading any results for my
.xls files. I hypothesized that the _skip_row method was, for whatever
reason, skipping all the results. I was correct, and found out that
the indices of an Excel sheet (through the xlrd module) need to be
integers, not string. My normalzing class returns strings, thus
causing _skip_row to always return false as xlrd couldn't do
anything with a string.
self.header is a list, and so I run the list through my normalzing
class which returns a list with one value (the column we want). I
turn that list value into a string and find the index of that string
within the header list.
That returns the correct integer value for the column which holds the
contest name.
"""
self.header = sheet.row_values(0)
self.votes_index = normalize_index(
self.header,
normalize_votes)
self.contest_index = normalize_index(
self.header,
normalize_contest)
self.candidate_index = normalize_index(
self.header,
normalize_candidate)
self.precinct_index = normalize_index(
self.header,
normalize_precinct)
self.jurisdiction_index = normalize_index(
self.header,
normalize_precinct)
try:
self.party_index = normalize_index(
self.header,
normalize_precinct)
except __HOLE__:
pass
for row in xrange(sheet.nrows):
if self._skip_row(row, sheet):
continue
else:
votes = int(sheet.cell(rowx=row, colx=self.votes_index).value)
rr_kwargs = self._common_kwargs.copy()
rr_kwargs.update(self._build_candidate_kwargs(row, sheet))
rr_kwargs.update(self._build_contest_kwargs(row, sheet))
rr_kwargs.update({
'votes': votes,
'ocd_id': "{}".format(self._get_ocd_id(rr_kwargs['jurisdiction']))
})
# Get party
try:
party = str(sheet.cell(
rowx=row,
colx=self.party_index).value).strip()
rr_kwargs.update({
'party': party
})
except TypeError:
"""
Should this be implemented?
Would need to extract the error message from the loop
to avoid potentially printing the message over 1,000 times
"""
# logger.info('No party')
pass
try:
sh_val = sheet.cell(
rowx=row,
colx=self.contest_index).value
rr_kwargs.update(
{'district': '{}'.format(normalize_district(self.header, sh_val, row=False))})
except KeyError:
pass
RawResult.objects.insert(results)
|
IndexError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/load.py/WALoaderExcel.load
|
1,723
|
def distance(self, v1, v2):
""" Returns the cached distance between two vectors.
"""
try:
# Two Vector objects for which the distance was already calculated.
d = self._cache[(v1.id, v2.id)]
except KeyError:
# Two Vector objects for which the distance has not been calculated.
d = self._cache[(v1.id, v2.id)] = distance(v1, v2, method=self.method)
except __HOLE__:
# No "id" property, so not a Vector but a plain dict.
d = distance(v1, v2, method=self.method)
return d
|
AttributeError
|
dataset/ETHPy150Open clips/pattern/pattern/vector/__init__.py/DistanceMap.distance
|
1,724
|
def hierarchical(vectors, k=1, iterations=1000, distance=COSINE, **kwargs):
""" Returns a Cluster containing k items (vectors or clusters with nested items).
With k=1, the top-level cluster contains a single cluster.
"""
id = sequence()
features = kwargs.get("features", _features(vectors))
clusters = Cluster((v for v in shuffled(vectors)))
centroids = [(next(id), v) for v in clusters]
map = {}
for _ in range(iterations):
if len(clusters) <= max(k, 1):
break
nearest, d0 = None, None
for i, (id1, v1) in enumerate(centroids):
for j, (id2, v2) in enumerate(centroids[i+1:]):
# Cache the distance calculations between vectors.
# This is identical to DistanceMap.distance(),
# but it is faster in the inner loop to use it directly.
try:
d = map[(id1, id2)]
except __HOLE__:
d = map[(id1, id2)] = _distance(v1, v2, method=distance)
if d0 is None or d < d0:
nearest, d0 = (i, j+i+1), d
# Pairs of nearest clusters are merged as we move up the hierarchy:
i, j = nearest
merged = Cluster((clusters[i], clusters[j]))
clusters.pop(j)
clusters.pop(i)
clusters.append(merged)
# Cache the center of the new cluster.
v = centroid(merged.flatten(), features)
centroids.pop(j)
centroids.pop(i)
centroids.append((next(id), v))
return clusters
#from pattern.vector import Vector
#
#v1 = Vector(wings=0, beak=0, claws=1, paws=1, fur=1) # cat
#v2 = Vector(wings=0, beak=0, claws=0, paws=1, fur=1) # dog
#v3 = Vector(wings=1, beak=1, claws=1, paws=0, fur=0) # bird
#
#print(hierarchical([v1, v2, v3]))
#### CLASSIFIER ####################################################################################
# Classification can be used to predict the label of an unlabeled document.
# Classification is a supervised machine learning method that uses labeled documents
# (i.e., Document objects with a type) as training examples to statistically predict
# the label (type, class) of new documents, based on their similarity to the training examples
# using a distance metric (e.g., cosine similarity).
#--- CLASSIFIER BASE CLASS -------------------------------------------------------------------------
# The default baseline (i.e., the default predicted class) is the most frequent class:
|
KeyError
|
dataset/ETHPy150Open clips/pattern/pattern/vector/__init__.py/hierarchical
|
1,725
|
def get(self, option):
try:
stdout, stderr = subprocess.Popen(
[self.cmd, option],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except __HOLE__ as ex:
# e.g., [Errno 2] No such file or directory
raise OSError("Could not find geos-config script")
if stderr and not stdout:
raise ValueError(stderr.strip())
if sys.version_info[0] >= 3:
result = stdout.decode('ascii').strip()
else:
result = stdout.strip()
log.debug('%s %s: %r', self.cmd, option, result)
return result
|
OSError
|
dataset/ETHPy150Open Toblerity/Shapely/setup.py/GEOSConfig.get
|
1,726
|
def test_assertEqual_incomparable(self):
apple = MockEquality('apple')
orange = ['orange']
try:
self.assertEqual(apple, orange)
except self.failureException:
self.fail("Fail raised when ValueError ought to have been raised.")
except __HOLE__:
# good. error not swallowed
pass
else:
self.fail("Comparing %r and %r should have raised an exception"
% (apple, orange))
|
ValueError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/trial/test/test_assertions.py/TestSynchronousAssertions.test_assertEqual_incomparable
|
1,727
|
def test_failUnlessRaises_unexpected(self):
try:
self.failUnlessRaises(ValueError, self._raiseError, TypeError)
except __HOLE__:
self.fail("failUnlessRaises shouldn't re-raise unexpected "
"exceptions")
except self.failureException:
# what we expect
pass
else:
self.fail("Expected exception wasn't raised. Should have failed")
|
TypeError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/trial/test/test_assertions.py/TestSynchronousAssertions.test_failUnlessRaises_unexpected
|
1,728
|
def test_skew(self):
try:
from scipy.stats import skew
except __HOLE__:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
|
ImportError
|
dataset/ETHPy150Open pydata/pandas/pandas/tests/test_panel4d.py/SafeForLongAndSparse.test_skew
|
1,729
|
def build_file_response(path,
cache_timeout=None,
cached_modify_time=None,
mimetype=None,
default_text_mime=DEFAULT_TEXT_MIME,
default_binary_mime=DEFAULT_BINARY_MIME,
file_wrapper=FileWrapper,
response_type=Response):
resp = response_type('')
if cache_timeout and cached_modify_time:
try:
mtime = get_file_mtime(path)
except (ValueError, IOError, OSError): # TODO: winnow this down
raise Forbidden(is_breaking=False)
resp.cache_control.public = True
if mtime <= cached_modify_time:
resp.status_code = 304
resp.cache_control.max_age = cache_timeout
return resp
if not isfile(path):
raise NotFound(is_breaking=False)
try:
file_obj = open(path, 'rb')
mtime = get_file_mtime(path)
fsize = os.path.getsize(path)
except (ValueError, IOError, __HOLE__):
raise Forbidden(is_breaking=False)
if not mimetype:
mimetype, encoding = mimetypes.guess_type(path)
if not mimetype:
peeked = peek_file(file_obj, 1024)
is_binary = is_binary_string(peeked)
if peeked and is_binary:
mimetype = default_binary_mime
else:
mimetype = default_text_mime
resp.response = file_wrapper(file_obj)
resp.content_type = mimetype
resp.content_length = fsize
resp.last_modified = mtime
resp.cache_control.max_age = cache_timeout
return resp
|
OSError
|
dataset/ETHPy150Open mahmoud/clastic/clastic/static.py/build_file_response
|
1,730
|
def get_file_response(self, path, request):
try:
if not isinstance(path, basestring):
path = '/'.join(path)
full_path = find_file(self.search_paths, path)
if full_path is None:
raise NotFound(is_breaking=False)
except (__HOLE__, IOError, OSError):
raise Forbidden(is_breaking=False)
bfr = build_file_response
resp = bfr(full_path,
cache_timeout=self.cache_timeout,
cached_modify_time=request.if_modified_since,
mimetype=None,
default_text_mime=self.default_text_mime,
default_binary_mime=self.default_binary_mime,
file_wrapper=request.environ.get('wsgi.file_wrapper',
FileWrapper))
return resp
|
ValueError
|
dataset/ETHPy150Open mahmoud/clastic/clastic/static.py/StaticApplication.get_file_response
|
1,731
|
def _get_msg(self, msgid, lc):
"""Get message identified by msgid in a specific locale.
:param: msgid (string) the identifier of a string.
:param: lc (string) the locale.
:return: (string) the message from the .po file.
"""
# obtain the content in the proper language
try:
t = gettext.translation(lc, self.i18ndir, languages=[lc])
_ = t.ugettext
msgstr = _(msgid)
return msgstr
except __HOLE__ as e:
raise ConfigError("%s" % str(e))
|
IOError
|
dataset/ETHPy150Open TheTorProject/gettor/gettor/smtp.py/SMTP._get_msg
|
1,732
|
def _send_email(self, from_addr, to_addr, subject, msg, attach=None):
"""Send an email.
Take a 'from' and 'to' addresses, a subject and the content, creates
the email and send it.
:param: from_addr (string) the address of the sender.
:param: to_addr (string) the address of the recipient.
:param: subject (string) the subject of the email.
:param: msg (string) the content of the email.
:param: attach (string) the path of the mirrors list.
"""
email_obj = self._create_email(from_addr, to_addr, subject, msg)
if(attach):
# for now, the only email with attachment is the one for mirrors
try:
part = MIMEBase('application', "octet-stream")
part.set_payload(open(attach, "rb").read())
Encoders.encode_base64(part)
part.add_header(
'Content-Disposition',
'attachment; filename="mirrors.txt"'
)
email_obj.attach(part)
except __HOLE__ as e:
raise SendEmailError('Error with mirrors: %s' % str(e))
try:
s = smtplib.SMTP("localhost")
s.sendmail(from_addr, to_addr, email_obj.as_string())
s.quit()
except smtplib.SMTPException as e:
raise SendEmailError("Error with SMTP: %s" % str(e))
|
IOError
|
dataset/ETHPy150Open TheTorProject/gettor/gettor/smtp.py/SMTP._send_email
|
1,733
|
def getattrd(obj, name, default=sentinel):
"""
Same as getattr(), but allows dot notation lookup
Source: http://stackoverflow.com/a/14324459
"""
try:
return functools.reduce(getattr, name.split("."), obj)
except __HOLE__ as e:
if default is not sentinel:
return default
raise
|
AttributeError
|
dataset/ETHPy150Open singingwolfboy/flask-dance/flask_dance/utils.py/getattrd
|
1,734
|
def main(options, args):
logger = log.get_logger(name="mosaic", options=options)
img_mosaic = mosaic(logger, args, fov_deg=options.fov)
if options.outfile:
outfile = options.outfile
io_fits.use('astropy')
logger.info("Writing output to '%s'..." % (outfile))
try:
os.remove(outfile)
except __HOLE__:
pass
img_mosaic.save_as_file(outfile)
|
OSError
|
dataset/ETHPy150Open ejeschke/ginga/ginga/util/mosaic.py/main
|
1,735
|
def get_params(config_filename):
params = KeeperParams()
params.config_filename = 'config.json'
if config_filename:
params.config_filename = config_filename
try:
with open(params.config_filename) as config_file:
try:
params.config = json.load(config_file)
if 'user' in params.config:
params.user = params.config['user']
if 'server' in params.config:
params.server = params.config['server']
if 'password' in params.config:
params.password = params.config['password']
if 'challenge' in params.config:
try:
import keepercommander.yubikey.yubikey
challenge = params.config['challenge']
params.password = keepercommander.yubikey.yubikey.get_response(challenge)
except Exception as e:
print(e)
sys.exit(1)
if 'timedelay' in params.config:
params.timedelay = params.config['timedelay']
if 'mfa_token' in params.config:
params.mfa_token = params.config['mfa_token']
if 'mfa_type' in params.config:
params.mfa_type = params.config['mfa_type']
if 'commands' in params.config:
params.commands = params.config['commands']
if 'plugins' in params.config:
params.plugins = params.config['plugins']
if 'debug' in params.config:
params.debug = params.config['debug']
except:
print('Error: Unable to parse JSON file ' + params.config_filename)
raise
except __HOLE__:
if config_filename:
print('Error: Unable to open config file ' + config_filename)
pass
if not params.server:
params.server = 'https://keeperapp.com/v2/'
return params
|
IOError
|
dataset/ETHPy150Open Keeper-Security/Commander/keepercommander/cli.py/get_params
|
1,736
|
def loop(params):
display.welcome()
try:
while not params.user:
params.user = getpass.getpass(prompt='User(Email): ', stream=None)
# only prompt for password when no device token
while not params.password:
params.password = getpass.getpass(prompt='Password: ', stream=None)
# if commands are provided, execute those then exit
if params.commands:
runcommands(params)
goodbye()
if params.debug: print('Params: ' + str(params))
# start with a sync download
if not params.command:
params.command = 'd'
# go into interactive mode
while True:
if not params.command:
try:
params.command = input("Keeper > ")
except KeyboardInterrupt:
print('')
except EOFError:
raise KeyboardInterrupt
try:
if not do_command(params):
raise KeyboardInterrupt
except CommunicationError as e:
print("Communication Error:" + str(e.message))
except AuthenticationError as e:
print("AuthenticationError Error: " + str(e.message))
except KeyboardInterrupt as e:
raise
except:
print('An unexpected error occurred: ' + str(sys.exc_info()[0]))
raise
params.command = ''
except __HOLE__:
goodbye()
|
KeyboardInterrupt
|
dataset/ETHPy150Open Keeper-Security/Commander/keepercommander/cli.py/loop
|
1,737
|
def run(self):
_clean.run(self)
import fnmatch
# kill temporary files
patterns = [
# generic tempfiles
'*~', '*.bak', '*.pyc',
# tempfiles generated by ANTLR runs
't[0-9]*Lexer.py', 't[0-9]*Parser.py',
'*.tokens', '*__.g',
]
for path in ('antlr3', 'unittests', 'tests'):
path = os.path.join(os.path.dirname(__file__), path)
if os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=True):
graveyard = []
for pat in patterns:
graveyard.extend(fnmatch.filter(files, pat))
for name in graveyard:
filePath = os.path.join(root, name)
try:
log.info("removing '%s'", filePath)
os.unlink(filePath)
except __HOLE__, exc:
log.warn(
"Failed to delete '%s': %s",
filePath, exc
)
|
OSError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/antlr3/setup.py/clean.run
|
1,738
|
def __call__(self, func):
try:
http_methods = getattr(func, "__http_methods__")
except __HOLE__:
http_methods = []
setattr(func, "__http_methods__", http_methods)
http_methods.append((self.method, self.path,))
return func
|
AttributeError
|
dataset/ETHPy150Open google/grr/grr/gui/api_call_router.py/Http.__call__
|
1,739
|
def prompt(text='choice> '):
try:
# python 2
got = raw_input(text)
except __HOLE__:
# python 3
got = input(text)
return got
|
NameError
|
dataset/ETHPy150Open captin411/ofxclient/ofxclient/cli.py/prompt
|
1,740
|
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except __HOLE__:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
|
AttributeError
|
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/ensemble/gradient_boosting.py/GradientBoostingClassifier.predict_proba
|
1,741
|
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except __HOLE__:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
|
AttributeError
|
dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/ensemble/gradient_boosting.py/GradientBoostingClassifier.staged_predict_proba
|
1,742
|
def get_theme(themeId):
try:
theme = Theme.objects.get(id=themeId)
except __HOLE__:
return None
return {
'id': theme.id,
'name': theme.name,
'downloads': theme.downloads,
'author': theme.author,
'website': theme.website,
'comment': theme.comment,
'elements': dict([(e[0], gen_style(e[1])) for e in deserialize_from_lines(theme.elements.split('\n'))])
}
|
ObjectDoesNotExist
|
dataset/ETHPy150Open y-a-r-g/idea-color-themes/backend/logic/themes/themes.py/get_theme
|
1,743
|
def get_theme_archive(themeId):
try:
theme = Theme.objects.get(id=themeId)
except __HOLE__:
return None, None
theme.downloads = int(theme.downloads) + 1
theme.save()
if theme.archive:
data = theme.archive.read()
else:
data = serialize_to_idea_lines(deserialize_from_lines(theme.elements.split(u'\n')), theme.name)
data = u'\n'.join(data).encode('utf_8')
options = u"""<?xml version="1.0" encoding="UTF-8"?>
<application>
<component name="EditorColorsManagerImpl">
<option name="USE_ONLY_MONOSPACED_FONTS" value="true" />
<global_color_scheme name="%s" />
</component>
</application>
""" % theme.name
resultStream = StringIO()
archive = ZipFile(resultStream, 'w')
archive.writestr('IntelliJ IDEA Global Settings', '')
archive.writestr('options/colors.scheme.xml', options.encode('utf_8'))
archive.writestr('colors/%s.xml' % theme.name, data)
archive.close()
return theme.name.encode('utf_8'), resultStream.getvalue()
|
ObjectDoesNotExist
|
dataset/ETHPy150Open y-a-r-g/idea-color-themes/backend/logic/themes/themes.py/get_theme_archive
|
1,744
|
def allow_download_all(token_value):
dateLimit = (datetime.now() + timedelta(days=1)).date()
if token_value:
try:
ShoppingToken.objects.get(value=token_value, date__lte=dateLimit, payed=True)
return True
except __HOLE__:
pass
return False
|
ObjectDoesNotExist
|
dataset/ETHPy150Open y-a-r-g/idea-color-themes/backend/logic/themes/themes.py/allow_download_all
|
1,745
|
def import_theme(themeId):
try:
url = 'http://eclipsecolorthemes.org/?view=empty&action=download&theme=%s&type=xml'
response = urllib2.urlopen(url % themeId)
xml = response.read()
descr, elements = deserialize_from_eclipse_color_theme_xml(
xml.replace('\r', '').split('\n'))
name, author, website = descr
elements = '\n'.join(serialize_to_lines(elements))
if len(name) == 0:
return -1
try:
theme = Theme.objects.get(name=name)
theme.elements = elements
except __HOLE__:
theme = Theme(name=name,
author=author or 'Idea Color Themes',
website=website or 'http://www.ideacolorthemes.org',
elements=elements,
ect=themeId,
promote=False,
moderating=False,
comment="")
theme.save()
return theme.id
except:
return -1
|
ObjectDoesNotExist
|
dataset/ETHPy150Open y-a-r-g/idea-color-themes/backend/logic/themes/themes.py/import_theme
|
1,746
|
def json_publish(self, queue_name, body):
try:
return self.publish(queue_name, ujson.dumps(body))
except (__HOLE__, pika.exceptions.AMQPConnectionError):
self.log.warning("Failed to send to rabbitmq, trying to reconnect and send again")
self._reconnect()
return self.publish(queue_name, ujson.dumps(body))
|
AttributeError
|
dataset/ETHPy150Open zulip/zulip/zerver/lib/queue.py/SimpleQueueClient.json_publish
|
1,747
|
def _get_fullPrice(self):
""" Get price based on parent ConfigurableProduct """
# allow explicit setting of prices.
#qty_discounts = self.price_set.exclude(expires__isnull=False, expires__lt=datetime.date.today()).filter(quantity__lte=1)
try:
qty_discounts = Price.objects.filter(product__id=self.product.id).exclude(expires__isnull=False, expires__lt=datetime.date.today())
if qty_discounts.count() > 0:
# Get the price with the quantity closest to the one specified without going over
return qty_discounts.order_by('-quantity')[0].dynamic_price
if self.parent.product.unit_price is None:
log.warn("%s: Unexpectedly no parent.product.unit_price", self)
return None
except __HOLE__:
pass
# calculate from options
return self.parent.product.unit_price + self.price_delta()
|
AttributeError
|
dataset/ETHPy150Open dokterbob/satchmo/satchmo/apps/product/modules/configurable/models.py/ProductVariation._get_fullPrice
|
1,748
|
def _repr(self, value, pos):
__traceback_hide__ = True
try:
if value is None:
return ''
if self._unicode:
try:
value = six.text_type(value)
except __HOLE__:
value = str(value)
else:
value = str(value)
except:
exc_info = sys.exc_info()
e = exc_info[1]
e.args = (self._add_line_info(e.args[0], pos),)
six.reraise(exc_info[0], e, exc_info[2])
else:
if self._unicode and isinstance(value, six.binary_type):
if not self.decode_encoding:
raise UnicodeDecodeError(
'Cannot decode str value %r into unicode '
'(no default_encoding provided)' % value)
value = value.decode(self.default_encoding)
elif not self._unicode and isinstance(value, six.text_type):
if not self.decode_encoding:
raise UnicodeEncodeError(
'Cannot encode unicode value %r into str '
'(no default_encoding provided)' % value)
value = value.encode(self.default_encoding)
return value
|
UnicodeDecodeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/paste/util/template.py/Template._repr
|
1,749
|
def __getattr__(self, name):
try:
return self[name]
except __HOLE__:
raise AttributeError(name)
|
KeyError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/paste/util/template.py/bunch.__getattr__
|
1,750
|
def __getitem__(self, key):
if 'default' in self:
try:
return dict.__getitem__(self, key)
except __HOLE__:
return dict.__getitem__(self, 'default')
else:
return dict.__getitem__(self, key)
|
KeyError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/paste/util/template.py/bunch.__getitem__
|
1,751
|
def deploy_index():
"""
Custom module homepage for deploy (=RIT) to display online
documentation for the module
"""
response = current.response
def prep(r):
default_url = URL(f="mission", args="summary", vars={})
return current.s3db.cms_documentation(r, "RIT", default_url)
response.s3.prep = prep
output = current.rest_controller("cms", "post")
# Custom view
view = path.join(current.request.folder,
"modules",
"templates",
THEME,
"views",
"deploy",
"index.html",
)
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except __HOLE__:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
return output
# END =========================================================================
|
IOError
|
dataset/ETHPy150Open sahana/eden/modules/templates/RMSAmericas/controllers.py/deploy_index
|
1,752
|
def _validate_format(self, parser, format_dict):
"""
Validates the given format dictionary. Each key of this dict refers to a specific BBCode placeholder type.
eg. {TEXT} or {TEXT1} refer to the 'TEXT' BBCode placeholder type.
Each content is validated according to its associated placeholder type.
"""
for placeholder_string, content in format_dict.items():
try:
placeholder_results = re.findall(placeholder_content_re, placeholder_string)
assert len(placeholder_results)
placeholder_type, _, extra_context = placeholder_results[0]
valid_content = parser.placeholders[placeholder_type.upper()].validate(content, extra_context=extra_context[1:])
assert valid_content and valid_content is not None
except __HOLE__:
raise InvalidBBCodePlaholder(placeholder_type)
except AssertionError:
return False
return True
|
KeyError
|
dataset/ETHPy150Open ellmetha/django-precise-bbcode/precise_bbcode/bbcode/tag.py/BBCodeTag._validate_format
|
1,753
|
@app.route("/<topic>/", methods=["GET", "POST"])
@app.route("/<topic>/<group_or_key>/", methods=["GET", "POST"])
def flasfka(topic, group_or_key=None):
topic = topic.encode("utf-8")
if group_or_key is not None:
group_or_key = group_or_key.encode("utf-8")
client = get_kafka_client()
client.ensure_topic_exists(topic)
if flask.request.method == "GET":
limit = int(flask.request.args.get(
"limit", app.config["CONSUMER_LIMIT"]
))
group = group_or_key
return flask.jsonify(consume(topic, group, limit))
if flask.request.method == "POST":
key = group_or_key
data = flask.request.get_json(force=True)
try:
produce(topic, data["messages"], key)
return flask.make_response(("", 204, {}))
except (__HOLE__, TypeError):
return flask.make_response((
'expected format: {"messages": ["message1", ...]}',
400,
{}
))
# Snippet to attach the version to every request
|
KeyError
|
dataset/ETHPy150Open travel-intelligence/flasfka/flasfka/api.py/flasfka
|
1,754
|
def get_env_var(name, default=None):
value = _get_env_var_from_java(name)
if value is not None:
return value
try:
value = os.environ[_encode(name)]
except __HOLE__:
return default
else:
return _decode(value)
|
KeyError
|
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/utils/robotenv.py/get_env_var
|
1,755
|
def irm(ip,arg):
""" irm path[s]...
Remove file[s] or dir[s] path. Dirs are deleted recursively.
"""
try:
paths = mglob.expand(arg.split(None,1)[1])
except __HOLE__:
raise UsageError("%irm paths...")
import distutils.dir_util
for p in paths:
print("rm",p)
if os.path.isdir(p):
distutils.dir_util.remove_tree(p, verbose = 1)
else:
os.remove(p)
|
IndexError
|
dataset/ETHPy150Open ipython/ipython-py3k/IPython/quarantine/ipy_fsops.py/irm
|
1,756
|
def collect(ip,arg):
""" collect foo/a.txt rec:bar=*.py
Copies foo/a.txt to ~/_ipython/collect/foo/a.txt and *.py from bar,
likewise
Without args, try to open ~/_ipython/collect dir (in win32 at least).
"""
from IPython.external.path import path
basedir = path(ip.ipython_dir + '/collect')
try:
fs = mglob.expand(arg.split(None,1)[1])
except __HOLE__:
os.startfile(basedir)
return
for f in fs:
f = path(f)
trg = basedir / f.splitdrive()[1].lstrip('/\\')
if f.isdir():
print("mkdir",trg)
trg.makedirs()
continue
dname = trg.dirname()
if not dname.isdir():
dname.makedirs()
print(f,"=>",trg)
shutil.copy2(f,trg)
|
IndexError
|
dataset/ETHPy150Open ipython/ipython-py3k/IPython/quarantine/ipy_fsops.py/collect
|
1,757
|
def inote(ip,arg):
""" inote Hello world
Adds timestamp and Hello world to ~/_ipython/notes.txt
Without args, opens notes.txt for editing.
"""
import time
fname = ip.ipython_dir + '/notes.txt'
try:
entry = " === " + time.asctime() + ': ===\n' + arg.split(None,1)[1] + '\n'
f= open(fname, 'a').write(entry)
except __HOLE__:
ip.hooks.editor(fname)
|
IndexError
|
dataset/ETHPy150Open ipython/ipython-py3k/IPython/quarantine/ipy_fsops.py/inote
|
1,758
|
def _getMigration(major):
try:
ret = sys.modules[__name__].__dict__['MigrateTo_' + str(major)]
except __HOLE__:
return None
return ret
# return the last major.minor version for a given major
|
KeyError
|
dataset/ETHPy150Open sassoftware/conary/conary/server/migrate.py/_getMigration
|
1,759
|
def __getitem__(self, key):
try:
return getattr(self, key)
except __HOLE__ as exc:
raise KeyError(exc.message)
|
AttributeError
|
dataset/ETHPy150Open mozilla/elasticutils/elasticutils/__init__.py/FacetResult.__getitem__
|
1,760
|
def to_python(self, obj):
"""Converts strings in a data structure to Python types
It converts datetime-ish things to Python datetimes.
Override if you want something different.
:arg obj: Python datastructure
:returns: Python datastructure with strings converted to
Python types
.. Note::
This does the conversion in-place!
"""
if isinstance(obj, string_types):
if len(obj) == 26:
try:
return datetime.strptime(obj, '%Y-%m-%dT%H:%M:%S.%f')
except (TypeError, __HOLE__):
pass
elif len(obj) == 19:
try:
return datetime.strptime(obj, '%Y-%m-%dT%H:%M:%S')
except (TypeError, ValueError):
pass
elif len(obj) == 10:
try:
return datetime.strptime(obj, '%Y-%m-%d')
except (TypeError, ValueError):
pass
elif isinstance(obj, dict):
for key, val in obj.items():
obj[key] = self.to_python(val)
elif isinstance(obj, list):
return [self.to_python(item) for item in obj]
return obj
|
ValueError
|
dataset/ETHPy150Open mozilla/elasticutils/elasticutils/__init__.py/PythonMixin.to_python
|
1,761
|
def __repr__(self):
try:
return '<S {0}>'.format(repr(self.build_search()))
except __HOLE__:
# This can happen when you're debugging build_search() and
# try to repr the instance you're calling it on. Then that
# calls build_search() and CLOWN SHOES!
return repr(self.steps)
|
RuntimeError
|
dataset/ETHPy150Open mozilla/elasticutils/elasticutils/__init__.py/S.__repr__
|
1,762
|
def __init__(self, application_name=None, icon=None, host=None,
password=None, record_limit=None, record_delta=None,
level=NOTSET, filter=None, bubble=False):
NotificationBaseHandler.__init__(self, application_name, record_limit,
record_delta, level, filter, bubble)
# growl is using the deprecated md5 module, but we really don't need
# to see that deprecation warning
from warnings import filterwarnings
filterwarnings(module='Growl', category=DeprecationWarning,
action='ignore')
try:
import Growl
self._growl = Growl
except __HOLE__:
raise RuntimeError('The growl module is not available. You have '
'to install either growl-py or py-Growl to '
'use the GrowlHandler.')
if icon is not None:
if not os.path.isfile(icon):
raise IOError('Filename to an icon expected.')
icon = self._growl.Image.imageFromPath(icon)
else:
try:
icon = self._growl.Image.imageWithIconForCurrentApplication()
except TypeError:
icon = None
self._notifier = self._growl.GrowlNotifier(
applicationName=self.application_name,
applicationIcon=icon,
notifications=['Notset', 'Debug', 'Info', 'Notice', 'Warning',
'Error', 'Critical'],
hostname=host,
password=password
)
self._notifier.register()
|
ImportError
|
dataset/ETHPy150Open getlogbook/logbook/logbook/notifiers.py/GrowlHandler.__init__
|
1,763
|
def __init__(self, application_name=None, icon=None, no_init=False,
record_limit=None, record_delta=None, level=NOTSET,
filter=None, bubble=False):
NotificationBaseHandler.__init__(self, application_name, record_limit,
record_delta, level, filter, bubble)
try:
import pynotify
self._pynotify = pynotify
except __HOLE__:
raise RuntimeError('The pynotify library is required for '
'the LibNotifyHandler.')
self.icon = icon
if not no_init:
pynotify.init(self.application_name)
|
ImportError
|
dataset/ETHPy150Open getlogbook/logbook/logbook/notifiers.py/LibNotifyHandler.__init__
|
1,764
|
def set_notifier_icon(self, notifier, icon):
"""Used to attach an icon on a notifier object."""
try:
from gtk import gdk
except __HOLE__:
# TODO: raise a warning?
raise RuntimeError('The gtk.gdk module is required to set an icon.')
if icon is not None:
if not isinstance(icon, gdk.Pixbuf):
icon = gdk.pixbuf_new_from_file(icon)
notifier.set_icon_from_pixbuf(icon)
|
ImportError
|
dataset/ETHPy150Open getlogbook/logbook/logbook/notifiers.py/LibNotifyHandler.set_notifier_icon
|
1,765
|
def __init__(self, application_name=None, username=None, secret=None,
record_limit=None, record_delta=None, level=NOTSET,
filter=None, bubble=False, hide_level=False):
try:
import notifo
except __HOLE__:
raise RuntimeError(
'The notifo module is not available. You have '
'to install notifo to use the NotifoHandler.'
)
NotificationBaseHandler.__init__(self, None, record_limit,
record_delta, level, filter, bubble)
self._notifo = notifo
self.application_name = application_name
self.username = username
self.secret = secret
self.hide_level = hide_level
|
ImportError
|
dataset/ETHPy150Open getlogbook/logbook/logbook/notifiers.py/NotifoHandler.__init__
|
1,766
|
def parse_body(self):
try:
js = json.loads(self.body)
if js[js.keys()[0]]['response_type'] == "ERROR":
raise RimuHostingException(
js[js.keys()[0]]['human_readable_message']
)
return js[js.keys()[0]]
except __HOLE__:
raise RimuHostingException('Could not parse body: %s'
% (self.body))
except KeyError:
raise RimuHostingException('Could not parse body: %s'
% (self.body))
|
ValueError
|
dataset/ETHPy150Open secondstory/dewpoint/libcloud/drivers/rimuhosting.py/RimuHostingResponse.parse_body
|
1,767
|
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/ast.py/iter_fields
|
1,768
|
def GetArgsClass(self):
try:
return rdfvalue.RDFValue.GetPlugin(self.type)
except __HOLE__:
raise ValueError("No class found for type %s." % self.type)
|
KeyError
|
dataset/ETHPy150Open google/grr/grr/gui/api_call_handler_utils.py/ApiDataObjectKeyValuePair.GetArgsClass
|
1,769
|
def _qtactor_run(self):
self.process_start()
self.process()
# get gen_process generator
try:
self._qtactor_gen = self.gen_process()
except __HOLE__:
self._qtactor_gen = None
# do first step
if self._qtactor_gen:
self._qtactor_step()
|
AttributeError
|
dataset/ETHPy150Open sparkslabs/guild/guild/qtactor.py/QtActorMixin._qtactor_run
|
1,770
|
def _qtactor_step(self):
try:
self._qtactor_gen.next()
except __HOLE__:
self._qtactor_gen = None
return
# trigger next step
QtCore.QCoreApplication.postEvent(
self, QtCore.QEvent(self._qtactor_step_event),
QtCore.Qt.LowEventPriority)
|
StopIteration
|
dataset/ETHPy150Open sparkslabs/guild/guild/qtactor.py/QtActorMixin._qtactor_step
|
1,771
|
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except __HOLE__:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
|
IOError
|
dataset/ETHPy150Open eBay/restcommander/play-1.2.4/python/Lib/tempfile.py/_set_cloexec
|
1,772
|
def _stat(fn):
try:
f = open(fn)
except __HOLE__:
raise _os.error
f.close()
|
IOError
|
dataset/ETHPy150Open eBay/restcommander/play-1.2.4/python/Lib/tempfile.py/_stat
|
1,773
|
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'riscos':
dirname = _os.getenv('Wimp$ScrapDir')
if dirname: dirlist.append(dirname)
elif _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (__HOLE__, _os.error):
dirlist.append(_os.curdir)
return dirlist
|
AttributeError
|
dataset/ETHPy150Open eBay/restcommander/play-1.2.4/python/Lib/tempfile.py/_candidate_tempdir_list
|
1,774
|
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
flags = _text_openflags
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in xrange(100):
name = namer.next()
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, flags, 0600)
fp = _os.fdopen(fd, 'w')
fp.write('blat')
fp.close()
_os.unlink(filename)
del fp, fd
return dir
except (OSError, __HOLE__), e:
if e[0] != _errno.EEXIST:
break # no point trying more names in this directory
pass
raise IOError, (_errno.ENOENT,
("No usable temporary directory found in %s" % dirlist))
|
IOError
|
dataset/ETHPy150Open eBay/restcommander/play-1.2.4/python/Lib/tempfile.py/_get_default_tempdir
|
1,775
|
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except __HOLE__, e:
if e.errno == _errno.EEXIST:
continue # try again
raise
raise IOError, (_errno.EEXIST, "No usable temporary file name found")
# User visible interfaces.
|
OSError
|
dataset/ETHPy150Open eBay/restcommander/play-1.2.4/python/Lib/tempfile.py/_mkstemp_inner
|
1,776
|
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0700)
return file
except __HOLE__, e:
if e.errno == _errno.EEXIST:
continue # try again
raise
raise IOError, (_errno.EEXIST, "No usable temporary directory name found")
|
OSError
|
dataset/ETHPy150Open eBay/restcommander/play-1.2.4/python/Lib/tempfile.py/mkdtemp
|
1,777
|
def __repr__(self):
try:
name = self.Name()
except __HOLE__:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
|
NotImplementedError
|
dataset/ETHPy150Open adblockplus/gyp/pylib/gyp/xcodeproj_file.py/XCObject.__repr__
|
1,778
|
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except __HOLE__, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
|
TypeError
|
dataset/ETHPy150Open adblockplus/gyp/pylib/gyp/xcodeproj_file.py/XCObject._XCKVPrint
|
1,779
|
def onVariantPlaylist(self, playlist):
print "Found variant playlist."
masterPlaylist = HlsPlaylist()
masterPlaylist.version = playlist.version
for variant in playlist.variants:
subOutDir = self.outDir + str(variant.bandwidth)
print "Starting a sub hls-proxy for channel with bandwith ", variant.bandwidth, " in directory ", subOutDir
try:
os.mkdir(subOutDir)
except __HOLE__:
pass #mkdir throws if dir already exists
subProxy = HlsProxy(self.reactor)
subProxy.verbose = self.verbose
subProxy.download = self.download
subProxy.referer = self.referer
subProxy.dump_durations = self.dump_durations
subProxy.save_individual_playlists = self.save_individual_playlists
subProxy.setOutDir(subOutDir)
d = subProxy.run(variant.absoluteUrl)
#TODO add the deffered to self.finised somehow
masterVariant = HlsVarian()
masterPlaylist.variants.append(masterVariant)
masterVariant.absoluteUrl = str(variant.bandwidth) + "/stream.m3u8"
masterVariant.programId = variant.programId
masterVariant.bandwidth = variant.bandwidth
self.writeFile(self.getClientPlaylist(), masterPlaylist.toStr())
|
OSError
|
dataset/ETHPy150Open Viblast/hls-proxy/hlsproxy.py/HlsProxy.onVariantPlaylist
|
1,780
|
@classmethod
def setupClass(cls):
global numpy
try:
import numpy
except __HOLE__:
raise SkipTest('NumPy not available.')
|
ImportError
|
dataset/ETHPy150Open networkx/networkx/networkx/algorithms/link_analysis/tests/test_pagerank.py/TestPageRank.setupClass
|
1,781
|
@classmethod
def setupClass(cls):
global scipy
try:
import scipy
except __HOLE__:
raise SkipTest('SciPy not available.')
|
ImportError
|
dataset/ETHPy150Open networkx/networkx/networkx/algorithms/link_analysis/tests/test_pagerank.py/TestPageRankScipy.setupClass
|
1,782
|
def resolveEntity(self, publicId, systemId):
source = InputSource()
source.setSystemId(systemId)
try:
dtdPath = self.knownDTDs[systemId]
except __HOLE__:
raise process.ProcessingFailure(
"Invalid DTD system identifier (%r) in %s. Only "
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd "
"is allowed." % (systemId, self.filename))
source.setByteStream(dtdPath.open())
return source
|
KeyError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/lore/tree.py/_LocalEntityResolver.resolveEntity
|
1,783
|
def parseFileAndReport(filename, _open=file):
"""
Parse and return the contents of the given lore XHTML document.
@type filename: C{str}
@param filename: The name of a file containing a lore XHTML document to
load.
@raise process.ProcessingFailure: When the contents of the specified file
cannot be parsed.
@rtype: A DOM Document
@return: The document contained in C{filename}.
"""
content = _TagTrackingContentHandler()
error = _LocationReportingErrorHandler(content)
parser = make_parser()
parser.setContentHandler(content)
parser.setErrorHandler(error)
# In order to call a method on the expat parser which will be used by this
# parser, we need the expat parser to be created. This doesn't happen
# until reset is called, normally by the parser's parse method. That's too
# late for us, since it will then go on to parse the document without
# letting us do any extra set up. So, force the expat parser to be created
# here, and then disable reset so that the parser created is the one
# actually used to parse our document. Resetting is only needed if more
# than one document is going to be parsed, and that isn't the case here.
parser.reset()
parser.reset = lambda: None
# This is necessary to make the xhtml1 transitional declaration optional.
# It causes LocalEntityResolver.resolveEntity(None, None) to be called.
# LocalEntityResolver handles that case by giving out the xhtml1
# transitional dtd. Unfortunately, there is no public API for manipulating
# the expat parser when using xml.sax. Using the private _parser attribute
# may break. It's also possible that make_parser will return a parser
# which doesn't use expat, but uses some other parser. Oh well. :(
# -exarkun
parser._parser.UseForeignDTD(True)
parser.setEntityResolver(_LocalEntityResolver(filename))
# This is probably no-op because expat is not a validating parser. Who
# knows though, maybe you figured out a way to not use expat.
parser.setFeature(feature_validation, False)
fObj = _open(filename)
try:
try:
parser.parse(fObj)
except __HOLE__, e:
raise process.ProcessingFailure(
e.strerror + ", filename was '" + filename + "'")
finally:
fObj.close()
return content.document
|
IOError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/lore/tree.py/parseFileAndReport
|
1,784
|
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except __HOLE__:
dirname = sys.prefix
path = os.path.join(dirname, 'PyQt4.QtGui.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
|
NameError
|
dataset/ETHPy150Open yasoob/youtube-dl-GUI/build/bdist.win-amd64/winexe/temp/PyQt4.QtGui.py/__load
|
1,785
|
def convert(self, s):
parts = TokenTranslator._BREAK_ON_RE.split(s)
parts_iter = iter(parts)
converted_parts = []
for part in parts_iter:
if part == '' or TokenTranslator._DELIMITER_RE.match(part):
converted_parts.append(part)
elif TokenTranslator._UPPER_CASE_RE.match(part):
# Join to the rest of the word, if any.
token = part
try:
token += parts_iter.next()
except __HOLE__:
pass
converted_parts.append(self._convert_single_token(token))
else:
converted_parts.append(self._convert_single_token(part))
return self.handle_conversion(s, ''.join(converted_parts))
|
StopIteration
|
dataset/ETHPy150Open pantsbuild/pants/src/python/pants/backend/jvm/tasks/jvm_compile/anonymizer.py/TokenTranslator.convert
|
1,786
|
def _db_value_for_elem(self, elem):
try:
return self._valid_lookup[elem]
except __HOLE__:
raise LookupError(
'"%s" is not among the defined enum values' % elem)
|
KeyError
|
dataset/ETHPy150Open zzzeek/sqlalchemy/lib/sqlalchemy/sql/sqltypes.py/Enum._db_value_for_elem
|
1,787
|
def _object_value_for_elem(self, elem):
try:
return self._object_lookup[elem]
except __HOLE__:
raise LookupError(
'"%s" is not among the defined enum values' % elem)
|
KeyError
|
dataset/ETHPy150Open zzzeek/sqlalchemy/lib/sqlalchemy/sql/sqltypes.py/Enum._object_value_for_elem
|
1,788
|
def random_image(field):
color1 = random_rgb()
color2 = random_rgb()
color3 = random_rgb()
color4 = random_rgb()
size = (random.randint(300, 900), random.randint(300, 900))
im = Image.new("RGB", size) # create the image
draw = ImageDraw.Draw(im) # create a drawing object that is
draw.rectangle(
[(0, 0), ((size[0] / 2), (size[1] / 2))],
fill=color1
)
draw.rectangle(
[((size[0] / 2), 0), ((size[1] / 2), size[0])],
fill=color2
)
draw.rectangle(
[(0, (size[1] / 2)), ((size[0] / 2), size[1])],
fill=color3
)
draw.rectangle(
[((size[0] / 2), (size[1] / 2)), (size[0], size[1])],
fill=color4
)
filename = "%s.png" % uuid.uuid4().hex[:10]
filename = field.generate_filename(None, filename)
storage = DefaultStorage()
full_path = storage.path(filename)
directory = os.path.dirname(full_path)
try:
os.makedirs(directory)
except __HOLE__ as e:
if e.errno != errno.EEXIST:
raise
filehandle = storage.open(filename, mode="w")
im.save(filehandle, "PNG")
filehandle.close()
return filename # and we"re done!
|
OSError
|
dataset/ETHPy150Open ccollins/milkman/milkman/generators.py/random_image
|
1,789
|
def get_volume_connector(self, instance):
"""Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.host_state.get_host_stats(refresh=True)
try:
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (__HOLE__, KeyError) as err:
LOG.warning(_LW('Could not determine key: %s'), err,
instance=instance)
self._initiator = None
return {
'ip': self._get_block_storage_ip(),
'initiator': self._initiator,
'host': self._hypervisor_hostname
}
|
TypeError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/virt/xenapi/driver.py/XenAPIDriver.get_volume_connector
|
1,790
|
def __init__(self):
self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)
except (IOError, __HOLE__):
self.state = {}
|
ValueError
|
dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/site-packages/pip/utils/outdated.py/VirtualenvSelfCheckState.__init__
|
1,791
|
def __init__(self):
self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)[sys.prefix]
except (IOError, ValueError, __HOLE__):
self.state = {}
|
KeyError
|
dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/site-packages/pip/utils/outdated.py/GlobalSelfCheckState.__init__
|
1,792
|
def __init__(self,
pol,
src='any',
dst='any',
sport='any',
dport='any',
proto='any',
):
self.pol_obj = pol
self.proto = proto
# validate source port
if sport == 'any':
self.sport = sport
else:
self.sport = port.Port(sport)
# validate destination port
if dport == 'any':
self.dport = dport
else:
self.dport = port.Port(dport)
# validate source address
if src == 'any':
self.src = src
else:
try:
self.src = nacaddr.IP(src)
except ValueError:
raise AddressError('bad source address: %s\n' % src)
# validate destination address
if dst == 'any':
self.dst = dst
else:
try:
self.dst = nacaddr.IP(dst)
except __HOLE__:
raise AddressError('bad destination address: %s\n' % dst)
if type(self.pol_obj) is not policy.Policy:
raise BadPolicy('Policy object is not valid.')
self.matches = []
self.exact_matches = []
for header, terms in self.pol_obj.filters:
filtername = header.target[0].options[0]
for term in terms:
possible = []
logging.debug('checking term: %s', term.name)
if not self._AddrInside(self.src, term.source_address):
logging.debug('srcaddr does not match')
continue
logging.debug('srcaddr matches: %s', self.src)
if not self._AddrInside(self.dst, term.destination_address):
logging.debug('dstaddr does not match')
continue
logging.debug('dstaddr matches: %s', self.dst)
if (self.sport != 'any' and term.source_port and not
self._PortInside(self.sport, term.source_port)):
logging.debug('sport does not match')
continue
logging.debug('sport matches: %s', self.sport)
if (self.dport != 'any' and term.destination_port and not
self._PortInside(self.dport, term.destination_port)):
logging.debug('dport does not match')
continue
logging.debug('dport matches: %s', self.dport)
if (self.proto != 'any' and term.protocol and
self.proto not in term.protocol):
logging.debug('proto does not match')
continue
logging.debug('proto matches: %s', self.proto)
if term.protocol_except and self.proto in term.protocol_except:
logging.debug('protocol excepted by term, no match.')
continue
logging.debug('proto not excepted: %s', self.proto)
if not term.action: # avoid any verbatim
logging.debug('term had no action (verbatim?), no match.')
continue
logging.debug('term has an action')
possible = self._PossibleMatch(term)
self.matches.append(Match(filtername, term.name, possible, term.action,
term.qos))
if possible:
logging.debug('term has options: %s, not treating as exact match',
possible)
continue
# if we get here then we have a match, and if the action isn't next and
# there are no possibles, then this is a "definite" match and we needn't
# look for any further matches (i.e. later terms may match, but since
# we'll never get there we shouldn't report them)
if 'next' not in term.action:
self.exact_matches.append(Match(filtername, term.name, [],
term.action, term.qos))
break
|
ValueError
|
dataset/ETHPy150Open google/capirca/lib/aclcheck.py/AclCheck.__init__
|
1,793
|
def notifyOnDeath(self, cb):
""" Method is used to forward 'notifyOnDeath' calls to the wrapped
object. It is used to register a callback which will be called
when the wrapped object died.
@param cb: Callback which should be registered. The
callback should take the died object as only
argument.
@type cb: callable
"""
assert callable(cb)
try:
self._cbs.add(cb)
except __HOLE__:
raise AlreadyDead('{0} is already '
'dead.'.format(self.__class__.__name__))
|
AttributeError
|
dataset/ETHPy150Open rapyuta/rce/rce-core/rce/core/wrapper.py/_Wrapper.notifyOnDeath
|
1,794
|
def dontNotifyOnDeath(self, cb):
""" Method is used to forward 'dontNotifyOnDeath' calls to the wrapped
object. It is used to unregister a callback which should have been
called when the wrapped object died.
@param cb: Callback which should be unregistered.
@type cb: callable
"""
try:
self._cbs.remove(cb)
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open rapyuta/rce/rce-core/rce/core/wrapper.py/_Wrapper.dontNotifyOnDeath
|
1,795
|
def addInterface(self, iTag, iType, clsName):
""" Add an interface to the Robot object.
@param iTag: Tag which is used to identify the interface in
subsequent requests.
@type iTag: str
@param iType: Type of the interface. The type consists of a
prefix and a suffix.
- Valid prefixes are:
ServiceClient, ServiceProvider,
Publisher, Subscriber
- Valid suffixes are:
Converter, Forwarder
@type iType: str
@param clsName: Message type/Service type consisting of the
package and the name of the message/service,
i.e. 'std_msgs/Int32'.
@type clsName: str
"""
try:
validateName(iTag)
except IllegalName as e:
raise InvalidRequest('Interface tag is invalid: {0}'.format(e))
if iTag in self._interfaces:
raise InvalidRequest("Can not use the same interface tag '{0}' "
'in the same robot twice.'.format(iTag))
try:
iType = Types.encode(iType)
except __HOLE__:
raise InvalidRequest('Interface type is invalid.')
interface = self._obj.createInterface(iType, clsName, iTag)
interface = Interface(interface, iType, clsName)
self._interfaces[iTag] = interface
interface.notifyOnDeath(self._interfaceDied)
|
TypeError
|
dataset/ETHPy150Open rapyuta/rce/rce-core/rce/core/wrapper.py/Robot.addInterface
|
1,796
|
def removeInterface(self, iTag):
""" Remove an interface from the Robot object.
@param iTag: Tag which is used to identify the interface
which should be removed.
@type iTag: str
"""
try:
self._interfaces.pop(iTag).destroy()
except __HOLE__:
raise InvalidRequest('Can not remove a non existent interface '
"'{0}' from the robot.".format(iTag))
|
KeyError
|
dataset/ETHPy150Open rapyuta/rce/rce-core/rce/core/wrapper.py/Robot.removeInterface
|
1,797
|
def getInterface(self, iTag):
""" Return the wrapped interface instance matching the given tag.
@param iTag: Tag which is used to identify the interface
which should be returned.
@type iTag: str
@return: Wrapped interface instance which was requested.
@rtype: rce.core.user.Interface
"""
try:
return self._interfaces[iTag]
except __HOLE__:
raise InvalidRequest('Can not get a non existent interface '
"'{0}' from the robot.".format(iTag))
|
KeyError
|
dataset/ETHPy150Open rapyuta/rce/rce-core/rce/core/wrapper.py/Robot.getInterface
|
1,798
|
def removeNode(self, nTag):
""" Remove a node from the ROS environment inside the container.
@param nTag: Tag which is used to identify the ROS node
which should removed.
@type nTag: str
"""
try:
self._nodes.pop(nTag).destroy()
except __HOLE__:
raise InvalidRequest('Can not remove a non existent node '
"'{0}' from the container.".format(nTag))
|
KeyError
|
dataset/ETHPy150Open rapyuta/rce/rce-core/rce/core/wrapper.py/Container.removeNode
|
1,799
|
def removeParameter(self, name):
""" Remove a parameter from the ROS environment inside the container.
@param name: Name of the parameter which should be removed.
@type name: str
"""
try:
self._parameters.pop(name).destroy()
except __HOLE__:
raise InvalidRequest('Can not remove a non existent node '
"'{0}' from the container.".format(name))
|
KeyError
|
dataset/ETHPy150Open rapyuta/rce/rce-core/rce/core/wrapper.py/Container.removeParameter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.