Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
9,700
|
@idiokit.stream
def _normalize(self):
while True:
event = yield idiokit.next()
event.add("feed", "autoshun")
event.add("feed url", self.feed_url)
event.add("description", "This host has triggered an IDS alert.")
for info in event.values("info"):
for name, tuples in CLASSIFICATION.items():
if info.startswith(name):
for pair in tuples:
event.add(pair[0], pair[1])
event.add("autoshun classification", info)
event.clear("info")
if not event.contains("type"):
event.add("type", "ids alert")
times = event.values("time")
for time in times:
try:
event.add("source time", self._normalize_time(time))
except __HOLE__:
event.add("source time", self._normalize_date(time))
event.clear("time")
yield idiokit.send(event)
|
ValueError
|
dataset/ETHPy150Open abusesa/abusehelper/abusehelper/bots/autoshun/autoshunbot.py/AutoshunBot._normalize
|
9,701
|
def _normalize_time(self, time):
try:
parsed = _time.strptime(time, "%Y-%m-%d %H:%M:%S")
except __HOLE__:
parsed = _time.strptime(time, "%Y-%m-%d %H:%M:")
seconds = calendar.timegm(parsed)
seconds += self.time_offset * 3600 # UTC-5 to UTC
time_tuple = _time.gmtime(seconds)
return _time.strftime("%Y-%m-%d %H:%M:%SZ", time_tuple)
|
ValueError
|
dataset/ETHPy150Open abusesa/abusehelper/abusehelper/bots/autoshun/autoshunbot.py/AutoshunBot._normalize_time
|
9,702
|
def toposort(g, steps=None):
# Get set of all nodes, including those without outgoing edges
keys = set(g.keys())
values = set({})
for v in g.values():
values=values | set(v)
all_nodes=list(keys|values)
if (not steps):
steps = all_nodes
# Final order
order = []
# DFS stack, not using recursion
stack = []
# Unmarked set
unmarked = all_nodes
# visiting = [] - skip, don't expect 1000s of nodes, |E|/|V| is small
while unmarked:
stack.insert(0,unmarked[0]) # push first unmarked
while (stack):
n = stack[0]
add = True
try:
for m in g[n]:
if (m in unmarked):
add = False
stack.insert(0,m)
except KeyError:
pass
if (add):
if (n in steps and n not in order):
order.append(n)
item = stack.pop(0)
try:
unmarked.remove(item)
except __HOLE__:
pass
noorder = list(set(steps) - set(order))
return order + noorder
|
ValueError
|
dataset/ETHPy150Open open-cloud/xos/xos/synchronizers/base/toposort.py/toposort
|
9,703
|
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have __self__ and __func__ attributes
and be reconstructable via:
target.__func__.__get__( target.__self__ )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
def remove(weak, self=self):
"""Set self.isDead to true when method or instance is destroyed"""
methods = self.deletionMethods[:]
del self.deletionMethods[:]
try:
del self.__class__._allInstances[ self.key ]
except KeyError:
pass
for function in methods:
try:
if callable( function ):
function( self )
except Exception as e:
try:
traceback.print_exc()
except __HOLE__:
print('Exception during saferef %s cleanup function %s: %s' % (
self, function, e)
)
self.deletionMethods = [onDelete]
self.key = self.calculateKey( target )
self.weakSelf = weakref.ref(target.__self__, remove)
self.weakFunc = weakref.ref(target.__func__, remove)
self.selfName = str(target.__self__)
self.funcName = str(target.__func__.__name__)
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/dispatch/saferef.py/BoundMethodWeakref.__init__
|
9,704
|
def __init__(self, namespace='', pid=lambda: 'self', proc='/proc', registry=core.REGISTRY):
self._namespace = namespace
self._pid = pid
self._proc = proc
if namespace:
self._prefix = namespace + '_process_'
else:
self._prefix = 'process_'
self._ticks = 100.0
try:
self._ticks = os.sysconf('SC_CLK_TCK')
except (ValueError, TypeError, __HOLE__):
pass
# This is used to test if we can access /proc.
self._btime = 0
try:
self._btime = self._boot_time()
except IOError:
pass
if registry:
registry.register(self)
|
AttributeError
|
dataset/ETHPy150Open prometheus/client_python/prometheus_client/process_collector.py/ProcessCollector.__init__
|
9,705
|
def collect(self):
if not self._btime:
return []
try:
pid = os.path.join(self._proc, str(self._pid()).strip())
except:
# File likely didn't exist, fail silently.
raise
return []
result = []
try:
with open(os.path.join(pid, 'stat')) as stat:
parts = (stat.read().split(')')[-1].split())
vmem = core.GaugeMetricFamily(self._prefix + 'virtual_memory_bytes',
'Virtual memory size in bytes.', value=float(parts[20]))
rss = core.GaugeMetricFamily(self._prefix + 'resident_memory_bytes', 'Resident memory size in bytes.', value=float(parts[21]) * _PAGESIZE)
start_time_secs = float(parts[19]) / self._ticks
start_time = core.GaugeMetricFamily(self._prefix + 'start_time_seconds',
'Start time of the process since unix epoch in seconds.', value=start_time_secs + self._btime)
utime = float(parts[11]) / self._ticks
stime = float(parts[12]) / self._ticks
cpu = core.CounterMetricFamily(self._prefix + 'cpu_seconds_total',
'Total user and system CPU time spent in seconds.', value=utime + stime)
result.extend([vmem, rss, start_time, cpu])
except __HOLE__:
pass
try:
with open(os.path.join(pid, 'limits')) as limits:
for line in limits:
if line.startswith('Max open file'):
max_fds = core.GaugeMetricFamily(self._prefix + 'max_fds',
'Maximum number of open file descriptors.', value=float(line.split()[3]))
break
open_fds = core.GaugeMetricFamily(self._prefix + 'open_fds',
'Number of open file descriptors.', len(os.listdir(os.path.join(pid, 'fd'))))
result.extend([open_fds, max_fds])
except IOError:
pass
return result
|
IOError
|
dataset/ETHPy150Open prometheus/client_python/prometheus_client/process_collector.py/ProcessCollector.collect
|
9,706
|
def receive(self):
"""
Perform non blocking read on socket.
returns tuple of form (data, sa)
if no data then returns (b'',None)
but always returns a tuple with two elements
"""
try:
data, sa = self.ss.recvfrom(self.bs) # sa is source (host, port)
except socket.error as ex:
if ex.errno in (errno.EAGAIN, errno.EWOULDBLOCK):
return (b'', None) #receive has nothing empty string for data
else:
emsg = "socket.error = {0}: receiving at {1}\n".format(ex, self.ha)
console.profuse(emsg)
raise #re raise exception ex1
if console._verbosity >= console.Wordage.profuse: # faster to check
try:
load = data.decode("UTF-8")
except __HOLE__ as ex:
load = "0x{0}".format(hexlify(data).decode("ASCII"))
cmsg = ("\nServer at {0}, received from {1}:\n------------\n"
"{2}\n".format(self.ha, sa, load))
console.profuse(cmsg)
if self.wlog: # log over the wire rx
self.wlog.writeRx(sa, data)
return (data, sa)
|
UnicodeDecodeError
|
dataset/ETHPy150Open ioflo/ioflo/ioflo/aio/udp/udping.py/SocketUdpNb.receive
|
9,707
|
def send(self, data, da):
"""
Perform non blocking send on socket.
data is string in python2 and bytes in python3
da is destination address tuple (destHost, destPort)
"""
try:
result = self.ss.sendto(data, da) #result is number of bytes sent
except socket.error as ex:
emsg = "socket.error = {0}: sending from {1} to {2}\n".format(ex, self.ha, da)
console.profuse(emsg)
result = 0
raise
if console._verbosity >= console.Wordage.profuse:
try:
load = data[:result].decode("UTF-8")
except __HOLE__ as ex:
load = "0x{0}".format(hexlify(data[:result]).decode("ASCII"))
cmsg = ("\nServer at {0}, sent {1} bytes to {2}:\n------------\n"
"{3}\n".format(self.ha, result, da, load))
console.profuse(cmsg)
if self.wlog:
self.wlog.writeTx(da, data[:result])
return result
|
UnicodeDecodeError
|
dataset/ETHPy150Open ioflo/ioflo/ioflo/aio/udp/udping.py/SocketUdpNb.send
|
9,708
|
def consumer():
with Connection('127.0.0.1', 'guest', 'guest') as connection:
with connection.channel() as channel:
# Declare the Queue, 'simple_queue'.
channel.queue.declare('simple_queue')
# Set QoS to 100.
# This will limit the consumer to only prefetch a 100 messages.
# This is a recommended setting, as it prevents the
# consumer from keeping all of the messages in a queue to itself.
channel.basic.qos(100)
# Start consuming the queue 'simple_queue' using the callback
# 'on_message' and last require the message to be acknowledged.
channel.basic.consume(on_message, 'simple_queue', no_ack=False)
try:
# Start consuming messages.
# to_tuple equal to False means that messages consumed
# are returned as a Message object, rather than a tuple.
channel.start_consuming(to_tuple=False)
except __HOLE__:
channel.close()
|
KeyboardInterrupt
|
dataset/ETHPy150Open eandersson/amqpstorm/examples/simple_consumer.py/consumer
|
9,709
|
def __eq__(self, rhs):
try:
return isinstance(rhs, self.klass)
except __HOLE__:
return type(rhs) == type(self.klass) # flake8: noqa
|
TypeError
|
dataset/ETHPy150Open mopidy/mopidy/tests/__init__.py/IsA.__eq__
|
9,710
|
def show_tooltip(content, view=None, location=-1, timeout=0):
'''
Shows a tooltip.
@content
The tooltip's content (minihtml).
@view
The view in which the tooltip should be shown. If `None`, the active view
will be used if available.
@location
Text location at which the tooltip will be shown.
@timeout
If greater than 0, the tooltip will be autohidden after @timeout
milliseconds.
'''
if not view:
try:
view = sublime.active_window().active_view()
except __HOLE__ as e:
return
view.show_popup(content, location=location, max_width=500)
if timeout > 0:
def hide(current_id):
global TOOLTIP_ID
if TOOLTIP_ID == current_id:
view.hide_popup()
current_id = next(id_generator)
after(timeout, lambda: hide(current_id))
|
AttributeError
|
dataset/ETHPy150Open guillermooo/dart-sublime-bundle/lib/notifications.py/show_tooltip
|
9,711
|
def cleanup(self):
try:
os.remove(self.test_filename)
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/tests/functional/db/test_connection_switch.py/ConnectionSwitchTestCase.cleanup
|
9,712
|
def __init__(self, s='', command='', args=(), prefix='', msg=None,
reply_env=None):
assert not (msg and s), 'IrcMsg.__init__ cannot accept both s and msg'
if not s and not command and not msg:
raise MalformedIrcMsg('IRC messages require a command.')
self._str = None
self._repr = None
self._hash = None
self._len = None
self.reply_env = reply_env
self.tags = {}
if s:
originalString = s
try:
if not s.endswith('\n'):
s += '\n'
self._str = s
if s[0] == '@':
(server_tags, s) = s.split(' ', 1)
self.server_tags = parse_server_tags(server_tags[1:])
else:
self.server_tags = {}
if s[0] == ':':
self.prefix, s = s[1:].split(None, 1)
else:
self.prefix = ''
if ' :' in s: # Note the space: IPV6 addresses are bad w/o it.
s, last = s.split(' :', 1)
self.args = s.split()
self.args.append(last.rstrip('\r\n'))
else:
self.args = s.split()
self.command = self.args.pop(0)
if 'time' in self.server_tags:
s = self.server_tags['time']
date = datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%fZ')
date = minisix.make_datetime_utc(date)
self.time = minisix.datetime__timestamp(date)
else:
self.time = time.time()
except (IndexError, __HOLE__):
raise MalformedIrcMsg(repr(originalString))
else:
if msg is not None:
if prefix:
self.prefix = prefix
else:
self.prefix = msg.prefix
if command:
self.command = command
else:
self.command = msg.command
if args:
self.args = args
else:
self.args = msg.args
if reply_env:
self.reply_env = reply_env
elif msg.reply_env:
self.reply_env = msg.reply_env.copy()
else:
self.reply_env = None
self.tags = msg.tags.copy()
self.server_tags = msg.server_tags
self.time = msg.time
else:
self.prefix = prefix
self.command = command
assert all(ircutils.isValidArgument, args), args
self.args = args
self.time = None
self.server_tags = {}
self.args = tuple(self.args)
if isUserHostmask(self.prefix):
(self.nick,self.user,self.host)=ircutils.splitHostmask(self.prefix)
else:
(self.nick, self.user, self.host) = (self.prefix,)*3
|
ValueError
|
dataset/ETHPy150Open ProgVal/Limnoria/src/ircmsgs.py/IrcMsg.__init__
|
9,713
|
def parse(docstring, markup='plaintext', errors=None, **options):
"""
Parse the given docstring, and use it to construct a
C{ParsedDocstring}. If any fatal C{ParseError}s are encountered
while parsing the docstring, then the docstring will be rendered
as plaintext, instead.
@type docstring: C{string}
@param docstring: The docstring to encode.
@type markup: C{string}
@param markup: The name of the markup language that is used by
the docstring. If the markup language is not supported, then
the docstring will be treated as plaintext. The markup name
is case-insensitive.
@param errors: A list where any errors generated during parsing
will be stored. If no list is specified, then fatal errors
will generate exceptions, and non-fatal errors will be
ignored.
@type errors: C{list} of L{ParseError}
@rtype: L{ParsedDocstring}
@return: A L{ParsedDocstring} that encodes the contents of
C{docstring}.
@raise ParseError: If C{errors} is C{None} and an error is
encountered while parsing.
"""
# Initialize errors list.
raise_on_error = (errors is None)
if errors == None: errors = []
# Normalize the markup language name.
markup = markup.lower()
# Is the markup language valid?
if not re.match(r'\w+', markup):
_parse_warn('Bad markup language name %r. Treating '
'docstrings as plaintext.' % markup)
import epydoc.markup.plaintext as plaintext
return plaintext.parse_docstring(docstring, errors, **options)
# Is the markup language supported?
if markup not in _markup_language_registry:
_parse_warn('Unsupported markup language %r. Treating '
'docstrings as plaintext.' % markup)
import epydoc.markup.plaintext as plaintext
return plaintext.parse_docstring(docstring, errors, **options)
# Get the parse function.
parse_docstring = _markup_language_registry[markup]
# If it's a string, then it names a function to import.
if isinstance(parse_docstring, basestring):
try: exec('from %s import parse_docstring' % parse_docstring)
except ImportError, e:
_parse_warn('Error importing %s for markup language %s: %s' %
(parse_docstring, markup, e))
import epydoc.markup.plaintext as plaintext
return plaintext.parse_docstring(docstring, errors, **options)
_markup_language_registry[markup] = parse_docstring
# Keep track of which markup languages have been used so far.
MARKUP_LANGUAGES_USED.add(markup)
# Parse the docstring.
try: parsed_docstring = parse_docstring(docstring, errors, **options)
except __HOLE__: raise
except Exception, e:
if epydoc.DEBUG: raise
log.error('Internal error while parsing a docstring: %s; '
'treating docstring as plaintext' % e)
import epydoc.markup.plaintext as plaintext
return plaintext.parse_docstring(docstring, errors, **options)
# Check for fatal errors.
fatal_errors = [e for e in errors if e.is_fatal()]
if fatal_errors and raise_on_error: raise fatal_errors[0]
if fatal_errors:
import epydoc.markup.plaintext as plaintext
return plaintext.parse_docstring(docstring, errors, **options)
return parsed_docstring
# only issue each warning once:
|
KeyboardInterrupt
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/epydoc/markup/__init__.py/parse
|
9,714
|
def IntegerValidator(min, max):
"""
Factory function, returns validator method processing integers.
"""
def _validator(value):
try:
int(value)
if value < min:
raise forms.ValidationError(_("Minimum is %d" % min))
elif value > max:
raise forms.ValidationError(_("Maximum is %d" % max))
except __HOLE__:
raise forms.ValidationError(_("Must be a number"))
return value
return _validator
|
TypeError
|
dataset/ETHPy150Open lukaszb/django-projector/projector/utils/validators.py/IntegerValidator
|
9,715
|
def type_promotion(t1, t2):
if t2 == None:
return t1
t1 = _super_types.get(t1, t1)
t2 = _super_types.get(t2, t2)
if t1 == t2:
return t1 # matching super-types
try:
return _typePromotionMap[t1][t2]
except __HOLE__:
raise TypeError(
'Operators cannot combine datatypes %s and %s' % (t1, t2))
|
KeyError
|
dataset/ETHPy150Open RDFLib/rdflib/rdflib/plugins/sparql/datatypes.py/type_promotion
|
9,716
|
def __getitem__(self, k):
if self.has_override(k):
return self.get_override(k)
try:
return dict.__getitem__(self, k)
except __HOLE__, e:
if self.has_global_setting(k):
return self.get_global_setting(k)
else:
if self.has_field(k):
return self.get_default(k)
raise e
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/modules/output_modules.py/OutputModeConfig.__getitem__
|
9,717
|
def compute_output(self, output_module, configuration):
old_fname = output_module.get_input('value').name
full_path = self.get_filename(configuration,
suffix=(os.path.splitext(old_fname)[1] or
self.default_file_extension))
# we know we are in overwrite mode because it would have been
# flagged otherwise
if os.path.exists(full_path):
try:
os.remove(full_path)
except OSError, e:
raise ModuleError(output_module,
('Could not delete existing '
'path "%s"' % full_path))
try:
vistrails.core.system.link_or_copy(old_fname, full_path)
except __HOLE__, e:
msg = "Could not create file '%s': %s" % (full_path, e)
raise ModuleError(output_module, msg)
|
OSError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/modules/output_modules.py/FileToFileMode.compute_output
|
9,718
|
@classmethod
def can_compute(cls):
if cls.notebook_override is not None:
return cls.notebook_override
try:
import IPython.core.display
from IPython import get_ipython
from IPython.kernel.zmq.zmqshell import ZMQInteractiveShell
except __HOLE__:
return False
else:
ip = get_ipython()
if ip is not None and isinstance(ip, ZMQInteractiveShell):
warnings.warn(
"Looks like you might be running from IPython; you "
"might want to call\nvistrails.ipython_mode(True) to "
"enable IPythonMode, allowing output modules to\n"
"render to the notebook.\n"
"If this is wrong, please call "
"vistrails.ipython_mode(False) to get rid of this\n"
"warning.")
return False
|
ImportError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/modules/output_modules.py/IPythonMode.can_compute
|
9,719
|
def load_module(self, fullname):
# print "load_module:", fullname
from types import ModuleType
try:
s = self.sources[fullname]
is_pkg = False
except __HOLE__:
s = self.sources[fullname + '.__init__']
is_pkg = True
co = compile(s, fullname, 'exec')
module = sys.modules.setdefault(fullname, ModuleType(fullname))
module.__file__ = "%s/%s" % (__file__, fullname)
module.__loader__ = self
if is_pkg:
module.__path__ = [fullname]
do_exec(co, module.__dict__)
return sys.modules[fullname]
|
KeyError
|
dataset/ETHPy150Open alecthomas/injector/runtest.py/DictImporter.load_module
|
9,720
|
def _get_volume_creation_time(volume):
"""
Extract the creation time from an AWS or Rackspace volume.
XXX: libcloud doesn't represent volume creation time uniformly across
drivers. Thus this method only works on drivers specifically accounted
for. Should be extended or refactored for GCE support.
:param libcloud.compute.base.StorageVolume volume: The volume to query.
:returns: The datetime when the ``volume`` was created.
"""
try:
# AWS
return volume.extra['create_time']
except __HOLE__:
# Rackspace. Timestamps have no timezone indicated. Manual
# experimentation indicates timestamps are in UTC (which of course
# is the only reasonable thing).
return parse_date(
volume.extra['created_at']
).replace(tzinfo=tzutc())
|
KeyError
|
dataset/ETHPy150Open ClusterHQ/flocker/admin/cleanup.py/_get_volume_creation_time
|
9,721
|
def _is_test_volume(self, volume):
"""
Determine whether or not the given volume belongs to a test-created
Flocker cluster (and is therefore subject to automatic destruction).
:return: ``True`` if it does, ``False`` if it does not.
"""
try:
cluster_id = self._get_cluster_id(volume)
except __HOLE__:
return False
return self._is_test_cluster(cluster_id)
|
KeyError
|
dataset/ETHPy150Open ClusterHQ/flocker/admin/cleanup.py/CleanVolumes._is_test_volume
|
9,722
|
def to_internal_value(self, data):
"""Deserialize the field's JSON data, for write operations."""
try:
val = json.loads(data)
except __HOLE__:
val = data
return val
|
TypeError
|
dataset/ETHPy150Open deis/deis/controller/api/serializers.py/JSONFieldSerializer.to_internal_value
|
9,723
|
def to_internal_value(self, data):
"""Deserialize the field's JSON integer data."""
field = super(JSONIntFieldSerializer, self).to_internal_value(data)
for k, v in field.viewitems():
if v is not None: # NoneType is used to unset a value
try:
field[k] = int(v)
except __HOLE__:
field[k] = v
# Do nothing, the validator will catch this later
return field
|
ValueError
|
dataset/ETHPy150Open deis/deis/controller/api/serializers.py/JSONIntFieldSerializer.to_internal_value
|
9,724
|
def validate_cpu(self, value):
for k, v in value.viewitems():
if v is None: # use NoneType to unset a value
continue
if not re.match(PROCTYPE_MATCH, k):
raise serializers.ValidationError("Process types can only contain [a-z]")
shares = re.match(CPUSHARE_MATCH, str(v))
if not shares:
raise serializers.ValidationError("CPU shares must be an integer")
for v in shares.groupdict().viewvalues():
try:
i = int(v)
except __HOLE__:
raise serializers.ValidationError("CPU shares must be an integer")
if i > 1024 or i < 0:
raise serializers.ValidationError("CPU shares must be between 0 and 1024")
return value
|
ValueError
|
dataset/ETHPy150Open deis/deis/controller/api/serializers.py/ConfigSerializer.validate_cpu
|
9,725
|
def coerce_numeric(arr):
"""
Coerce an array into a numeric array, replacing non-numeric elements with
nans.
If the array is already a numeric type, it is returned unchanged
Parameters
----------
arr : `numpy.ndarray`
The array to coerce
"""
# already numeric type
if np.issubdtype(arr.dtype, np.number):
return arr
if np.issubdtype(arr.dtype, np.bool_):
return arr.astype(np.int)
# a string dtype, or anything else
try:
return pd.to_numeric(arr, errors='coerce')
except __HOLE__: # older versions of pandas
return pd.Series(arr).convert_objects(convert_numeric=True).values
|
AttributeError
|
dataset/ETHPy150Open glue-viz/glue/glue/utils/array.py/coerce_numeric
|
9,726
|
def pretty_number(numbers):
"""
Convert a list/array of numbers into a nice list of strings
Parameters
----------
numbers : list
The numbers to convert
"""
try:
return [pretty_number(n) for n in numbers]
except __HOLE__:
pass
n = numbers
if n == 0:
result = '0'
elif (abs(n) < 1e-3) or (abs(n) > 1e3):
result = "%0.3e" % n
elif abs(int(n) - n) < 1e-3 and int(n) != 0:
result = "%i" % n
else:
result = "%0.3f" % n
if result.find('.') != -1:
result = result.rstrip('0')
return result
|
TypeError
|
dataset/ETHPy150Open glue-viz/glue/glue/utils/array.py/pretty_number
|
9,727
|
def add_sheet_reference(self, formula):
patches = []
n_sheets = len(self.__worksheets)
sheet_refs, xcall_refs = formula.get_references()
for ref0, ref1, offset in sheet_refs:
if not ref0.isdigit():
try:
ref0n = self.__worksheet_idx_from_name[ref0.lower()]
except KeyError:
self.raise_bad_sheetname(ref0)
else:
ref0n = self.convert_sheetindex(ref0, n_sheets)
if ref1 == ref0:
ref1n = ref0n
elif not ref1.isdigit():
try:
ref1n = self.__worksheet_idx_from_name[ref1.lower()]
except __HOLE__:
self.raise_bad_sheetname(ref1)
else:
ref1n = self.convert_sheetindex(ref1, n_sheets)
if ref1n < ref0n:
msg = "Formula: sheets out of order; %r:%r -> (%d, %d)" \
% (ref0, ref1, ref0n, ref1n)
raise Exception(msg)
if self._ownbook_supbookx is None:
self.setup_ownbook()
reference = (self._ownbook_supbookx, ref0n, ref1n)
if reference in self.__sheet_refs:
patches.append((offset, self.__sheet_refs[reference]))
else:
nrefs = len(self.__sheet_refs)
if nrefs > 65535:
raise Exception('More than 65536 inter-sheet references')
self.__sheet_refs[reference] = nrefs
patches.append((offset, nrefs))
for funcname, offset in xcall_refs:
if self._ownbook_supbookx is None:
self.setup_ownbook()
if self._xcall_supbookx is None:
self.setup_xcall()
# print funcname, self._supbook_xref
patches.append((offset, self._xcall_supbook_ref))
if not isinstance(funcname, str):
funcname = funcname.decode(self.encoding)
if funcname in self._xcall_xref:
idx = self._xcall_xref[funcname]
else:
self._xcall_xref[funcname] = idx = len(self._xcall_xref)
patches.append((offset + 2, idx + 1))
formula.patch_references(patches)
##################################################################
## BIFF records generation
##################################################################
|
KeyError
|
dataset/ETHPy150Open kennethreitz/tablib/tablib/packages/xlwt3/Workbook.py/Workbook.add_sheet_reference
|
9,728
|
def autodiscover():
"""
Modified from django.contrib.admin.__init__ to look for reports.py
files.
Auto-discover INSTALLED_APPS reports.py modules and fail silently when
not present. This forces an import on them to register any reports they
may want.
"""
# Bail out if autodiscover didn't finish loading from a previous call so
# that we avoid running autodiscover again when the URLconf is loaded by
# the exception handler to resolve the handler500 view. This prevents a
# reports.py module with errors from re-registering models and raising a
# spurious AlreadyRegistered exception (see #8245).
global LOADING
if LOADING:
return
LOADING = True
import imp
from django.conf import settings
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an reports.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for reports.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own report registration.
try:
app_path = import_module(app).__path__
except __HOLE__:
continue
# Step 2: use imp.find_module to find the app's reports.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its reports.py doesn't exist
try:
imp.find_module('reports', app_path)
except ImportError:
continue
# Step 3: import the app's reports file. If this has errors we want
# them to bubble up.
import_module("%s.reports" % app)
# autodiscover was successful, reset loading flag.
LOADING = False
|
AttributeError
|
dataset/ETHPy150Open bkonkle/django-reporter/reporter/registration.py/autodiscover
|
9,729
|
def test_no_init_kwargs(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
SimpleView(key='value').as_view()
self.fail('Should not be able to instantiate a view')
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open django/django/tests/generic_views/test_base.py/ViewTest.test_no_init_kwargs
|
9,730
|
def test_no_init_args(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
SimpleView.as_view('value')
self.fail('Should not be able to use non-keyword arguments instantiating a view')
except __HOLE__:
pass
|
TypeError
|
dataset/ETHPy150Open django/django/tests/generic_views/test_base.py/ViewTest.test_no_init_args
|
9,731
|
def _get_matching_last_lobbyist_history(self, lobbyist_ids):
try:
last_lobbyist_history = LobbyistHistory.objects.filter(scrape_time__isnull=False).latest('scrape_time')
except __HOLE__:
last_lobbyist_history = None
if last_lobbyist_history is not None:
last_lobbyist_ids = sorted(last_lobbyist_history.lobbyists.all(), key=lambda lobbyist: lobbyist.id)
if (lobbyist_ids != last_lobbyist_ids):
last_lobbyist_history = None
return last_lobbyist_history
|
ObjectDoesNotExist
|
dataset/ETHPy150Open ofri/Open-Knesset/lobbyists/scrapers/__init__.py/MainScraperListStorage._get_matching_last_lobbyist_history
|
9,732
|
def _update_lobbyist_corporations(self, lobbyist_history):
corporation_lobbyists = {}
for lobbyist in lobbyist_history.lobbyists.all():
corporation, is_created = LobbyistCorporation.objects.get_or_create(
source_id=lobbyist.latest_data.corporation_id, name=lobbyist.latest_data.corporation_name
)
if not corporation.id in corporation_lobbyists:
corporation_lobbyists[corporation.id] = []
corporation_lobbyists[corporation.id].append(lobbyist.id)
for corporation_id in corporation_lobbyists:
lobbyist_ids = sorted(corporation_lobbyists[corporation_id])
corporation = LobbyistCorporation.objects.get(id=corporation_id)
need_corporation_data = True
try:
corporation_data = corporation.latest_data
if corporation_data.name == corporation.name and corporation_data.source_id == corporation.source_id:
last_lobbyist_ids = map(lambda lobbyist: lobbyist.id, sorted(corporation_data.lobbyists.all(), key=lambda lobbyist: lobbyist.id))
if last_lobbyist_ids == lobbyist_ids:
need_corporation_data = False
except __HOLE__:
pass
if need_corporation_data:
corporation_data = LobbyistCorporationData.objects.create(
name=corporation.name, source_id=corporation.source_id
)
for id in lobbyist_ids:
corporation_data.lobbyists.add(Lobbyist.objects.get(id=id))
corporation_data.scrape_time = datetime.now()
corporation_data.save()
corporation.data.add(corporation_data)
corporation.save()
|
ObjectDoesNotExist
|
dataset/ETHPy150Open ofri/Open-Knesset/lobbyists/scrapers/__init__.py/MainScraperListStorage._update_lobbyist_corporations
|
9,733
|
def is_db_expression(value):
try:
# django < 1.8
from django.db.models.expressions import ExpressionNode
return isinstance(value, ExpressionNode)
except __HOLE__:
# django >= 1.8 (big refactoring in Lookup/Expressions/Transforms)
from django.db.models.expressions import BaseExpression, Combinable
return isinstance(value, (BaseExpression, Combinable))
|
ImportError
|
dataset/ETHPy150Open romgar/django-dirtyfields/src/dirtyfields/compat.py/is_db_expression
|
9,734
|
def clean_value_for_csv(value_to_store):
try:
value_to_store = value_to_store.encode("utf-8").strip()
except __HOLE__:
pass
return value_to_store
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/clean_value_for_csv
|
9,735
|
@cached_property
def email_hash(self):
try:
return hashlib.md5(self.email).hexdigest()
except __HOLE__:
return None # there's no email to hash.
|
TypeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/Profile.email_hash
|
9,736
|
@cached_property
def latest_diff_ts(self):
ts_list = [p.latest_diff_timestamp for p in self.products_not_removed if p.latest_diff_timestamp]
try:
return sorted(ts_list, reverse=True)[0]
except __HOLE__:
return None
|
IndexError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/Profile.latest_diff_ts
|
9,737
|
def get_tips(self):
try:
return self.tips.split(",")
except __HOLE__:
return []
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/Profile.get_tips
|
9,738
|
def add_products(self, product_id_dict):
try:
analytics_credentials = self.get_analytics_credentials()
except __HOLE__:
# AnonymousUser doesn't have method
analytics_credentials = {}
product_id_type = product_id_dict.keys()[0]
add_even_if_removed = True # re-add even if previously removed
new_products = self.get_new_products(
product_id_type,
product_id_dict[product_id_type],
analytics_credentials,
add_even_if_removed)
tiids = [product.tiid for product in new_products]
return tiids
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/Profile.add_products
|
9,739
|
def update_products_from_linked_account(self, account, add_even_if_removed):
added_tiids = []
if account=="twitter":
pass
# don't update twitter right now
# self.update_twitter()
else:
account_value = getattr(self, account+"_id", None)
tiids_to_add = []
if account_value:
try:
analytics_credentials = self.get_analytics_credentials()
except __HOLE__:
# AnonymousUser doesn't have method
analytics_credentials = {}
new_products = self.get_new_products(
account,
account_value,
analytics_credentials,
add_even_if_removed)
added_tiids = [product.tiid for product in new_products]
return added_tiids
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/Profile.update_products_from_linked_account
|
9,740
|
def patch(self, newValuesDict):
for k, v in newValuesDict.iteritems():
# hack. only save lowercase emails.
if k == "email":
v = v.lower()
# convert all strings to unicode
if isinstance(v, basestring):
v = unicode(v)
# if this Profile has this property, overwrite it with the supplied val
if hasattr(self, k):
try:
setattr(self, k, v)
except __HOLE__:
pass
return self
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/Profile.patch
|
9,741
|
def build_csv_rows(self):
header_metric_names = []
for product in self.display_products:
for metric in product.metrics:
header_metric_names += [metric.fully_qualified_metric_name]
header_metric_names = sorted(list(set(header_metric_names)))
header_alias_names = ["title", "doi"]
# make header row
header_list = ["tiid"] + header_alias_names + header_metric_names
ordered_fieldnames = OrderedDict([(col, None) for col in header_list])
# body rows
rows = []
for product in self.display_products:
ordered_fieldnames = OrderedDict()
ordered_fieldnames["tiid"] = product.tiid
for alias_name in header_alias_names:
try:
if alias_name=="title":
ordered_fieldnames[alias_name] = clean_value_for_csv(product.biblio.title)
else:
ordered_fieldnames[alias_name] = clean_value_for_csv(product.aliases.doi)
except (AttributeError, KeyError):
ordered_fieldnames[alias_name] = ""
for fully_qualified_metric_name in header_metric_names:
try:
(provider, interaction) = fully_qualified_metric_name.split(":")
most_recent_snap = product.get_metric_by_name(provider, interaction).most_recent_snap
value = most_recent_snap.raw_value_cleaned_for_export
ordered_fieldnames[fully_qualified_metric_name] = clean_value_for_csv(value)
except (__HOLE__, KeyError):
ordered_fieldnames[fully_qualified_metric_name] = ""
rows += [ordered_fieldnames]
return(ordered_fieldnames, rows)
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/Profile.build_csv_rows
|
9,742
|
def get_new_products(self, provider_name, product_seeds, analytics_credentials={}, add_even_if_removed=False):
save_profile_refresh_status(self, RefreshStatus.states["PROGRESS_BAR"])
if add_even_if_removed:
tiids_to_exclude = self.tiids
else:
tiids_to_exclude = self.tiids_including_removed # don't re-import dup or removed products
try:
new_products = import_and_create_products(
self.id,
provider_name,
product_seeds,
analytics_credentials,
tiids_to_exclude)
except (__HOLE__, ProviderError):
new_products = []
return new_products
|
ImportError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/Profile.get_new_products
|
9,743
|
def dict_about(self, show_secrets=True):
secrets = [
"email",
"wordpress_api_key",
"password_hash"
]
properties_to_return = [
"id",
"given_name",
"bio",
"surname",
"full_name",
"email",
"email_hash",
"url_slug",
"collection_id",
"created",
"last_viewed_profile",
"last_refreshed",
"last_email_check",
"last_email_sent",
"orcid_id",
"github_id",
"slideshare_id",
"twitter_id",
"figshare_id",
"publons_id",
"google_scholar_id",
"wordpress_api_key",
"stripe_id",
"days_left_in_trial",
"new_metrics_notification_dismissed",
"notification_email_frequency",
"is_advisor",
"linked_accounts",
"is_subscribed",
"is_trialing",
"trial_extended_until",
"trial_end_date",
"is_live",
"institution"
# these make calls out to Stripe, plus we're not using them anyway.
#"is_paid_subscriber",
#"subscription_start_date"
]
ret_dict = {}
for prop in properties_to_return:
val = getattr(self, prop, None)
try:
# if we want dict, we probably want something json-serializable
val = val.isoformat()
except __HOLE__:
pass
if show_secrets:
ret_dict[prop] = val
elif not show_secrets and not prop in secrets:
ret_dict[prop] = val
else:
pass # hide_secrets=True, and this is a secret. don't return it.
return ret_dict
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/Profile.dict_about
|
9,744
|
def hide_profile_secrets(profile):
secrets = [
"wordpress_api_key"
]
try:
for key in secrets:
delattr(profile, key)
except (__HOLE__, KeyError):
pass
return profile
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/hide_profile_secrets
|
9,745
|
def get_profile_summary_dict(profile):
deets = defaultdict(int)
deets["url_slug"] = profile.url_slug
deets["url"] = u"https://impactstory.org/{url}".format(url=profile.url_slug)
deets["profile_id"] = profile.id
deets["email"] = profile.email
deets["full_name"] = profile.full_name
deets["given_name"] = profile.given_name
deets["surname"] = profile.surname
deets["stripe_id"] = profile.stripe_id
deets["created"] = profile.created.isoformat()
deets["is_subscriber"] = profile.is_subscribed
deets["is_paid_subscriber"] = profile.is_paid_subscriber
deets["is_unpaid_subscriber"] = profile.is_subscribed and not profile.is_paid_subscriber
deets["is_advisor"] = profile.is_advisor
deets["google_scholar_id"] = profile.google_scholar_id
deets["orcid_id"] = profile.orcid_id
deets["github_id"] = profile.github_id
deets["slideshare_id"] = profile.slideshare_id
deets["twitter_id"] = profile.twitter_id
deets["figshare_id"] = profile.figshare_id
deets["publons_id"] = profile.publons_id
deets["has_bio"] = (None != profile.bio)
deets["got_new_metrics_email"] = (None != profile.last_email_sent)
deets["subscription_date"] = profile.subscription_start_date
awards = profile.get_profile_awards()
if awards:
deets["oa_badge"] = awards[0].level_name
else:
deets["oa_badge"] = None
deets["num_countries"] = len(profile.countries.countries)
products = profile.display_products
deets["num_products"] = len(products)
deets["earliest_publication_year"] = 9999
mendeley_disciplines = Counter()
badges = Counter()
highly_badges = Counter()
citations = Counter()
for product in products:
deets["num_tweets_on_products"] += len(product.tweets)
for award in product.awards:
badges[award.engagement_type] += 1
if award.is_highly:
highly_badges[award.engagement_type] += 1
if product.awards:
deets["num_products_with_awards"] += 1
if product.genre=="article":
if product.awards:
deets["num_articles_with_awards"] += 1
if product.aliases:
if product.aliases.display_doi:
deets["num_articles_with_dois"] += 1
if product.aliases.display_pmid:
deets["num_articles_with_pmids"] += 1
if product.aliases.display_pmc:
deets["num_articles_with_pmcs"] += 1
if product.aliases.resolved_url:
if "peerj" in product.aliases.resolved_url:
deets["num_articles_with_peerj"] += 1
if "arxiv" in product.aliases.resolved_url:
deets["num_articles_with_arxiv"] += 1
if "plos" in product.aliases.resolved_url:
deets["num_articles_with_plos"] += 1
if "figshare" in product.aliases.resolved_url:
deets["num_articles_with_figshare"] += 1
num_highly_awards_for_this_product = len([1 for award in product.awards if award.is_highly])
if num_highly_awards_for_this_product:
deets["num_highly_awards"] += 1
if product.has_file:
deets["num_uploaded_files"] += 1
if product.embed_markup:
deets["num_products_with_embed_markup"] += 1
if product.has_metrics:
deets["num_products_with_metrics"] += 1
if product.biblio:
try:
if product.biblio.year and int(product.biblio.year) < deets["earliest_publication_year"]:
deets["earliest_publication_year"] = int(product.biblio.year)
if product.biblio.year and int(product.biblio.year) > 2007:
deets["num_articles_since_2007"] += 1
deets["num_tweets_on_articles_since_2007"] += len(product.tweets)
except (AttributeError, ValueError):
pass
try:
if hasattr(product.biblio, "journal") and "ecolog" in product.biblio.journal.lower():
deets["num_articles_with_ecology_journal"] += 1
except __HOLE__:
pass
try:
if hasattr(product.biblio, "journal") and "librar" in product.biblio.journal.lower():
deets["num_articles_with_library_journal"] += 1
except AttributeError:
pass
citation_metric = product.get_metric_by_name("scopus", "citations")
if citation_metric:
citations[citation_metric.current_value] += 1
deets["num_products_with_citations"] += 1
deets["num_citations"] += citation_metric.current_value
mendeley_disciplines[product.mendeley_discipline] += 1
gravatar_url = "http://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({'gravatar_id':hashlib.md5(profile.email.lower()).hexdigest()})
gravatar_url += "?d=404" #gravatar returns 404 if doesn't exist, with this
gravitar_response = requests.get(gravatar_url)
if gravitar_response.status_code==200:
deets["has_gravitar"] = True
deets["highly_badges"] = highly_badges.most_common(5)
deets["badges"] = badges.most_common(5)
deets["num_genres"] = len(profile.genres)
top_disciplines = mendeley_disciplines.most_common(4)
deets["mendeley_discipline_top_3"] = [(name, count) for (name, count) in top_disciplines if name!=None]
try:
deets["mendeley_discipline"] = deets["mendeley_discipline_top_3"][0][0]
except IndexError:
pass
sorted_citations = citations.most_common()
sorted_citations.sort(key=lambda tup: tup[0], reverse=True)
# print sorted_citations
number_of_papers_with_more_citations = 0
for (cites, count) in sorted_citations:
number_of_papers_with_more_citations += count
if number_of_papers_with_more_citations > cites:
break
deets["hindex"] = number_of_papers_with_more_citations
# print deets["hindex"]
# print deets["hindex"]
for genre_dict in profile.genres:
deets["num_genre_" + genre_dict.name] = genre_dict.num_products
if deets["num_tweets_on_articles_since_2007"]:
deets["num_tweets_per_recent_article"] = deets["num_tweets_on_articles_since_2007"] / deets["num_articles_since_2007"]
num_articles = deets["num_genre_article"]
if num_articles:
for key in deets.keys():
if key.startswith("num_articles_with", ):
key_percent = key.replace("num_articles_with_", "perc_")
deets[key_percent] = int(round(100*deets[key] / num_articles))
del deets[key]
return deets
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/profile.py/get_profile_summary_dict
|
9,746
|
def add_pre_hook(self, request, results):
if hasattr(self, 'pre_process'):
cb = request.callback
@wraps(cb)
def wrapper(response):
try:
results.startTest(self.testcase_pre)
self.pre_process(response)
results.stopTest(self.testcase_pre)
except __HOLE__:
results.addFailure(self.testcase_pre, sys.exc_info())
except Exception:
results.addError(self.testcase_pre, sys.exc_info())
else:
results.addSuccess(self.testcase_pre)
finally:
return list(iterate_spider_output(cb(response)))
request.callback = wrapper
return request
|
AssertionError
|
dataset/ETHPy150Open scrapy/scrapy/scrapy/contracts/__init__.py/Contract.add_pre_hook
|
9,747
|
def add_post_hook(self, request, results):
if hasattr(self, 'post_process'):
cb = request.callback
@wraps(cb)
def wrapper(response):
output = list(iterate_spider_output(cb(response)))
try:
results.startTest(self.testcase_post)
self.post_process(output)
results.stopTest(self.testcase_post)
except __HOLE__:
results.addFailure(self.testcase_post, sys.exc_info())
except Exception:
results.addError(self.testcase_post, sys.exc_info())
else:
results.addSuccess(self.testcase_post)
finally:
return output
request.callback = wrapper
return request
|
AssertionError
|
dataset/ETHPy150Open scrapy/scrapy/scrapy/contracts/__init__.py/Contract.add_post_hook
|
9,748
|
def _get_templated_url(self, template, id, method=None):
id = re.sub('^http(s?)://', '', id)
try:
id = urllib.quote(id, safe="")
except __HOLE__: # thrown if bad characters
pass
url = template % id
return(url)
|
KeyError
|
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/providers/plossearch.py/Plossearch._get_templated_url
|
9,749
|
def create_class(class_name, dynamic_imports):
"""Dynamically creates a class.
It is tried if the class can be created by the already given imports.
If not the list of the dynamically loaded classes is used.
"""
try:
new_class = globals()[class_name]
if not inspect.isclass(new_class):
raise TypeError('Not a class!')
return new_class
except (__HOLE__, TypeError):
for dynamic_class in dynamic_imports:
# Dynamic classes can be provided directly as a Class instance,
# for example as `MyCustomParameter`,
# or as a string describing where to import the class from,
# for instance as `'mypackage.mymodule.MyCustomParameter'`.
if inspect.isclass(dynamic_class):
if class_name == dynamic_class.__name__:
return dynamic_class
else:
# The class name is always the last in an import string,
# e.g. `'mypackage.mymodule.MyCustomParameter'`
class_name_to_test = dynamic_class.split('.')[-1]
if class_name == class_name_to_test:
new_class = load_class(dynamic_class)
return new_class
raise ImportError('Could not create the class named `%s`.' % class_name)
|
KeyError
|
dataset/ETHPy150Open SmokinCaterpillar/pypet/pypet/utils/dynamicimports.py/create_class
|
9,750
|
def discard(self, item):
try:
self.remove(item)
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/lxml-3.3.6/src/lxml/html/_setmixin.py/SetMixin.discard
|
9,751
|
def interactive_expressions():
try:
index = 1
while True:
try:
code = raw_input('%d fora-> ' % index)
while balance_pos(code) != len(code):
code += "\n" + raw_input('%d -----> ' % index)
yield (code, index)
index = index + 1
except UnbalancedException, e:
print "Error: unbalanced delimiter."
except __HOLE__:
#not an error - just a reset indication
index = index + 1
print "\n\n\t[Interrupted]\n"
except EOFError:
# Not an error; just the end of input.
print# make sure following output starts on a new line
return
#TODO BUG brax: fora command-line should leave objects as FORA objects in the repl environment
#
#Otherwise, we get python objects mixed in, which is weird. This is different
#than what we want to happen when we're accessing FORA from python itself.
|
KeyboardInterrupt
|
dataset/ETHPy150Open ufora/ufora/ufora/FORA/python/fora_interpreter.py/interactive_expressions
|
9,752
|
def main(parsedArguments):
isLocalEvaluator = True
with createViewFactory():
FORA.initialize(useLocalEvaluator=isLocalEvaluator)
locals = {}
try:
for code in parsedArguments.expressionsToEvaluate:
eval_expression(code, locals, parsedArguments.shouldPrintEvaluationTime)
for a in parsedArguments.files:
with open(a) as f:
code = f.read()
eval_expression(code, locals, parsedArguments.shouldPrintEvaluationTime)
if parsedArguments.repeaters:
while True:
for r in parsedArguments.repeaters:
eval_expression(r, locals, parsedArguments.shouldPrintEvaluationTime)
Evaluator.evaluator().flush()
if parsedArguments.alwaysRunInterpreted or (
not parsedArguments.expressionsToEvaluate and
not parsedArguments.files and
not parsedArguments.repeaters
):
for code, index in interactive_expressions():
try:
eval_expression(code, locals, parsedArguments.shouldPrintEvaluationTime, index)
except __HOLE__:
print "\n\n\t[Interrupted]\n"
except KeyboardInterrupt:
print "\n\n\t[Interrupted]\n"
except Exceptions.FatalException as ex:
print "\nERROR: " + ex.message
print "Exiting FORA."
import os
os._exit(os.EX_UNAVAILABLE)
time.sleep(.5)
|
KeyboardInterrupt
|
dataset/ETHPy150Open ufora/ufora/ufora/FORA/python/fora_interpreter.py/main
|
9,753
|
def create_api_role(name, problematic):
"""
Create and register a new role to create links for an API documentation.
Create a role called `name`, which will use the URL resolver registered as
``name`` in `api_register` to create a link for an object.
:Parameters:
`name` : `str`
name of the role to create.
`problematic` : `bool`
if True, the registered role will create problematic nodes in
case of failed references. If False, a warning will be raised
anyway, but the output will appear as an ordinary literal.
"""
def resolve_api_name(n, rawtext, text, lineno, inliner,
options={}, content=[]):
if docutils is None:
raise AssertionError('requires docutils')
# Check if there's separate text & targets
m = _TARGET_RE.match(text)
if m: text, target = m.groups()
else: target = text
# node in monotype font
text = utils.unescape(text)
node = nodes.literal(rawtext, text, **options)
# Get the resolver from the register and create an url from it.
try:
url = api_register[name].get_url(target)
except __HOLE__, exc:
msg = inliner.reporter.warning(str(exc), line=lineno)
if problematic:
prb = inliner.problematic(rawtext, text, msg)
return [prb], [msg]
else:
return [node], []
if url is not None:
node = nodes.reference(rawtext, '', node, refuri=url, **options)
return [node], []
roles.register_local_role(name, resolve_api_name)
#{ Command line parsing
# --------------------
|
IndexError
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/epydoc/docwriter/xlink.py/create_api_role
|
9,754
|
def _assert_valid_nodeid(nodeid): # pragma: no cover
try:
_validate_nodeid(nodeid)
except __HOLE__ as e:
raise AssertionError(e.message)
|
ValueError
|
dataset/ETHPy150Open eallik/spinoff/spinoff/remoting/validation.py/_assert_valid_nodeid
|
9,755
|
def _assert_valid_addr(addr): # pragma: no cover
try:
_validate_addr(addr)
except __HOLE__ as e:
raise AssertionError(e.message)
# semantic alias for prefixing with `assert`
|
ValueError
|
dataset/ETHPy150Open eallik/spinoff/spinoff/remoting/validation.py/_assert_valid_addr
|
9,756
|
def main():
args = parser.parse_args()
formatter = logging.Formatter("%(levelname)s: %(message)s")
console = logging.StreamHandler(codecs.getwriter('utf8')(sys.stderr))
console.setFormatter(formatter)
logging.root.addHandler(console)
if args.debug:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(logging.INFO)
if args.instatrace:
instatrace.init_trace(args.instatrace)
try:
args.run(args)
except __HOLE__:
print
sys.exit(1)
|
KeyboardInterrupt
|
dataset/ETHPy150Open pteichman/cobe/cobe/control.py/main
|
9,757
|
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.22'):
raise Exception('Building PyFunt requires Cython >= 0.22')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
try:
# if fromfile == 'im2col_cython.pyx':
# print('compiling im2col_cython')
# r = subprocess.call(
# ['python', 'pyfunt/layers/setup.py', 'build_ext', '--inplace'])
# else:
r = subprocess.call(
['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except __HOLE__:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception("Cython either isn't installed or it failed.")
except OSError:
raise OSError('Cython needs to be installed')
|
OSError
|
dataset/ETHPy150Open dnlcrl/PyFunt/tools/cythonize.py/process_pyx
|
9,758
|
def process_tempita_pyx(fromfile, tofile):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except __HOLE__:
raise Exception('Building PyFunt requires Tempita: '
'pip install --user Tempita')
from_filename = tempita.Template.from_filename
template = from_filename(fromfile, encoding=sys.getdefaultencoding())
pyxcontent = template.substitute()
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
|
ImportError
|
dataset/ETHPy150Open dnlcrl/PyFunt/tools/cythonize.py/process_tempita_pyx
|
9,759
|
def main():
try:
root_dir = sys.argv[1]
except __HOLE__:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
|
IndexError
|
dataset/ETHPy150Open dnlcrl/PyFunt/tools/cythonize.py/main
|
9,760
|
def get_filediff(self, request, *args, **kwargs):
"""Returns the FileDiff, or an error, for the given parameters."""
review_request_resource = resources.review_request
try:
review_request = review_request_resource.get_object(
request, *args, **kwargs)
except __HOLE__:
return DOES_NOT_EXIST
if not review_request_resource.has_access_permissions(request,
review_request):
return self.get_no_access_error(request)
try:
return resources.filediff.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
|
ObjectDoesNotExist
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/patched_file.py/PatchedFileResource.get_filediff
|
9,761
|
def upload_file(self, command, pyversion, filename):
# Makes sure the repository URL is compliant
schema, netloc, url, params, query, fragments = \
urlparse.urlparse(self.repository)
if params or query or fragments:
raise AssertionError("Incompatible url %s" % self.repository)
if schema not in ('http', 'https'):
raise AssertionError("unsupported schema " + schema)
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
f = open(filename,'rb')
try:
content = f.read()
finally:
f.close()
meta = self.distribution.metadata
data = {
# action
':action': 'file_upload',
'protcol_version': '1',
# identify release
'name': meta.get_name(),
'version': meta.get_version(),
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
'md5_digest': md5(content).hexdigest(),
# additional meta-data
'metadata_version' : '1.0',
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
comment = ''
if command == 'bdist_rpm':
dist, version, id = platform.dist()
if dist:
comment = 'built for %s %s' % (dist, version)
elif command == 'bdist_dumb':
comment = 'built for %s' % platform.platform(terse=1)
data['comment'] = comment
if self.sign:
data['gpg_signature'] = (os.path.basename(filename) + ".asc",
open(filename+".asc").read())
# set up the authentication
auth = "Basic " + standard_b64encode(self.username + ":" +
self.password)
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if isinstance(value, tuple):
fn = ';filename="%s"' % value[0]
value = value[1]
else:
fn = ""
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write(fn)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
# build the Request
headers = {'Content-type':
'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth}
request = Request(self.repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
if self.show_response:
msg = '\n'.join(('-' * 75, r.read(), '-' * 75))
self.announce(msg, log.INFO)
except socket.error, e:
self.announce(str(e), log.ERROR)
return
except __HOLE__, e:
status = e.code
reason = e.msg
if status == 200:
self.announce('Server response (%s): %s' % (status, reason),
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (status, reason),
log.ERROR)
|
HTTPError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/distutils/command/upload.py/upload.upload_file
|
9,762
|
def width(ch):
"""
Compute the display width of the given character.
Useful for cursor-repositioning tasks, however this is not entirely
reliable since different terminal emulators have different behavior in
this area.
@see: U{http://unicode.org/reports/tr11/}
@return: The width in 1/2 ems of the given single-length unicode string.
@rtype: C{int}
"""
widthSpecifier = unicodedata.east_asian_width(ch)
try:
return _widths[widthSpecifier]
except __HOLE__:
raise KeyError("%r has a width that is not supported: %s"
% (ch, widthSpecifier))
|
KeyError
|
dataset/ETHPy150Open twisted/imaginary/imaginary/wiring/terminalui.py/width
|
9,763
|
def add(self, byte):
"""
Push a byte onto the unicode string; if the byte contains or
completes a full utf-8-encoded character, that new character
will be included in the result of L{get}.
"""
try:
character = byte.decode('utf-8')
except UnicodeDecodeError:
if self._buf and isinstance(self._buf[-1], str):
try:
character = (self._buf[-1] + byte).decode('utf-8')
except __HOLE__:
self._buf[-1] = self._buf[-1] + byte
else:
self._buf[-1] = character
else:
self._buf.append(byte)
else:
self._buf.append(character)
|
UnicodeDecodeError
|
dataset/ETHPy150Open twisted/imaginary/imaginary/wiring/terminalui.py/AsynchronousIncrementalUTF8Decoder.add
|
9,764
|
def decode_cookie(cookie):
'''
This decodes a cookie given by `encode_cookie`. If verification of the
cookie fails, ``None`` will be implicitly returned.
:param cookie: An encoded cookie.
:type cookie: str
'''
try:
payload, digest = cookie.rsplit(u'|', 1)
if hasattr(digest, 'decode'):
digest = digest.decode('ascii') # pragma: no cover
except __HOLE__:
return
if safe_str_cmp(_cookie_digest(payload), digest):
return payload
|
ValueError
|
dataset/ETHPy150Open maxcountryman/flask-login/flask_login/utils.py/decode_cookie
|
9,765
|
def replace_arguments(cmd, args):
"""Do parameter substitution for the given command.
The special variable $* is expanded to contain all filenames.
"""
def arg_sub(m):
"""Replace a positional variable with the appropriate argument."""
index = int(m.group(1)) - 1
try:
return args[index]
except __HOLE__:
return ''
did_replacement = False
shlex_convert_text_type = (not _SHLEX_SUPPORTS_UNICODE and
isinstance(cmd, six.text_type))
if shlex_convert_text_type:
cmd = cmd.encode('utf-8')
for part in shlex.split(cmd):
if part == '$*':
did_replacement = True
for arg in args:
yield arg
else:
part, subs = _arg_re.subn(arg_sub, part)
if subs != 0:
did_replacement = True
if shlex_convert_text_type:
part = part.decode('utf-8')
yield part
if not did_replacement:
for arg in args:
yield arg
|
IndexError
|
dataset/ETHPy150Open reviewboard/rbtools/rbtools/utils/aliases.py/replace_arguments
|
9,766
|
def run_alias(alias, args):
"""Run the alias with the given arguments, after expanding parameters.
Parameter expansion is done by the replace_arguments function.
"""
cmd, use_shell = expand_alias(alias, args)
try:
if use_shell:
# If we are using the shell, we must provide our program as a
# string instead of a sequence.
cmd = subprocess.list2cmdline(cmd)
return subprocess.call(cmd, shell=use_shell)
except __HOLE__ as e:
logging.error('Could not execute alias "%s"; it was malformed: %s',
alias, e)
return 1
|
ValueError
|
dataset/ETHPy150Open reviewboard/rbtools/rbtools/utils/aliases.py/run_alias
|
9,767
|
def clean_username(self, username):
"""Makes sure that the username is a valid email address."""
validator = EmailValidator()
try:
validator(username) # If invalid, will raise a ValidationError
except __HOLE__:
log.debug('Invalid email address: %r', username)
return None
else:
return username
|
ValidationError
|
dataset/ETHPy150Open dropbox/nsot/nsot/middleware/auth.py/EmailHeaderBackend.clean_username
|
9,768
|
def analyze_db(database,
only_count=False,
force_same_shape=False,
):
"""
Looks at the data in a prebuilt database and verifies it
Also prints out some information about it
Returns True if all entries are valid
Arguments:
database -- path to the database
Keyword arguments:
only_count -- only count the entries, don't inspect them
force_same_shape -- throw an error if not all images have the same shape
"""
start_time = time.time()
# Open database
try:
database = validate_database_path(database)
except __HOLE__ as e:
logger.error(e.message)
return False
reader = DbReader(database)
logger.info('Total entries: %s' % reader.total_entries)
unique_shapes = Counter()
count = 0
update_time = None
for key, value in reader.entries():
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if (not datum.HasField('height') or datum.height == 0 or
not datum.HasField('width') or datum.width == 0):
if datum.encoded:
if force_same_shape or not len(unique_shapes.keys()):
# Decode datum to learn the shape
s = StringIO()
s.write(datum.data)
s.seek(0)
img = PIL.Image.open(s)
width, height = img.size
channels = len(img.split())
else:
# We've already decoded one image, don't bother reading the rest
width = '?'
height = '?'
channels = '?'
else:
errstr = 'Shape is not set and datum is not encoded'
logger.error(errstr)
raise ValueError(errstr)
else:
width, height, channels = datum.width, datum.height, datum.channels
shape = '%sx%sx%s' % (width, height, channels)
unique_shapes[shape] += 1
if force_same_shape and len(unique_shapes.keys()) > 1:
logger.error("Images with different shapes found: %s and %s" % tuple(unique_shapes.keys()))
return False
count += 1
# Send update every 2 seconds
if update_time is None or (time.time() - update_time) > 2:
logger.debug('>>> Key %s' % key)
print_datum(datum)
logger.debug('Progress: %s/%s' % (count, reader.total_entries))
update_time = time.time()
if only_count:
# quit after reading one
count = reader.total_entries
logger.info('Assuming all entries have same shape ...')
unique_shapes[unique_shapes.keys()[0]] = count
break
if count != reader.total_entries:
logger.warning('LMDB reported %s total entries, but only read %s' % (reader.total_entries, count))
for key, val in sorted(unique_shapes.items(), key=operator.itemgetter(1), reverse=True):
logger.info('%s entries found with shape %s (WxHxC)' % (val, key))
logger.info('Completed in %s seconds.' % (time.time() - start_time,))
return True
|
ValueError
|
dataset/ETHPy150Open NVIDIA/DIGITS/tools/analyze_db.py/analyze_db
|
9,769
|
def _read_headers(self, data):
"""
Read the headers of an HTTP response from the socket into the current
HTTPResponse object, and prepare to read the body. Or, if necessary,
follow a redirect.
"""
if not self._requests:
return
request = self._requests[0]
response = request.response
self._reset_timer()
ind = data.find(CRLF)
if ind == -1:
initial_line = data
data = ''
else:
initial_line = data[:ind]
data = data[ind+2:]
try:
http_version, status, status_text = initial_line.split(' ', 2)
status = int(status)
if not http_version.startswith('HTTP/'):
self._do_error(MalformedResponse("Invalid HTTP protocol "
"version %r." % http_version))
return
except __HOLE__:
self._do_error(MalformedResponse("Invalid status line."))
return
# Parse the headers.
headers = read_headers(data) if data else {}
# Store what we've got so far on the response.
response.http_version = http_version
response.status_code = status
response.status_text = status_text
response.headers = headers
# Load any cookies.
if 'Set-Cookie' in headers:
if not response.cookies:
request.cookies = Cookie.SimpleCookie()
response.cookies = request.session.cookies = request.cookies
cookies = headers['Set-Cookie']
if not isinstance(cookies, list):
cookies = [cookies]
for val in cookies:
val_jar = Cookie.SimpleCookie()
val_jar.load(val)
for key in val_jar:
morsel = val_jar[key]
if not morsel['domain']:
morsel['domain'] = _hostname(request.url)
response.cookies.load(morsel.output(None, ''))
# Are we dealing with a HEAD request?
if request.method == 'HEAD':
# Just be done.
self._on_response()
return
# Do the on_headers callback.
continue_request = self._safely_call(request.session.on_headers,
response)
if continue_request is False:
# Abort the connection now.
self._requests.pop(0)
self._want_close = True
self._no_process = False
self._stream.close(False)
return
# Is there a Content-Length header?
if 'Content-Length' in headers:
response.length = int(headers['Content-Length'])
response.remaining = response.length
# If there's no length, immediately we've got a response.
if not response.remaining:
self._on_response()
return
self._stream.on_read = self._read_body
self._stream.read_delimiter = min(CHUNK_SIZE, response.remaining)
# What about Transfer-Encoding?
elif 'Transfer-Encoding' in headers:
if headers['Transfer-Encoding'] != 'chunked':
self._do_error(MalformedResponse(
"Unable to handle Transfer-Encoding %r." %
headers['Transfer-Encoding']))
return
response.length = 0
self._stream.on_read = self._read_chunk_head
self._stream.read_delimiter = CRLF
# Is this not a persistent connection? If so, read the whole body.
elif not response._keep_alive:
response.length = 0
response.remaining = 0
self._reading_forever = True
self._stream.on_read = self._read_forever
# We have to have a read_delimiter of None, otherwise our data
# gets deleted when the connection is closed.
self._stream.read_delimiter = None
# There must not be a body, so go ahead and be done.
else:
# We've got a response.
self._on_response()
return
# Do we have any Content-Encoding?
if 'Content-Encoding' in headers:
encoding = headers['Content-Encoding']
if not encoding in CONTENT_ENCODING:
self._do_error(MalformedResponse(
"Unable to handle Content-Encoding %r." % encoding))
return
response._decoder = CONTENT_ENCODING[encoding]()
|
ValueError
|
dataset/ETHPy150Open ecdavis/pants/pants/http/client.py/HTTPClient._read_headers
|
9,770
|
def __init__(self, client, on_response=None, on_headers=None,
on_progress=None, on_ssl_error=None, on_error=None,
timeout=None, max_redirects=None, keep_alive=None, auth=None,
headers=None, cookies=None, verify_ssl=None,
ssl_options=None):
""" Initialize the Session. """
# Store the client and parent.
if isinstance(client, Session):
self.parent = parent = client
self.client = client = self.parent.client
else:
self.client = client
parent = client._sessions[-1] if client._sessions else None
self.parent = parent
# Setup our default settings.
if on_response is None:
on_response = parent.on_response if parent else client.on_response
if on_headers is None:
on_headers = parent.on_headers if parent else client.on_headers
if on_progress is None:
on_progress = parent.on_progress if parent else client.on_progress
if on_ssl_error is None:
if parent:
on_ssl_error = parent.on_ssl_error
else:
on_ssl_error = client.on_ssl_error
if on_error is None:
on_error = parent.on_error if parent else client.on_error
if timeout is None:
timeout = parent.timeout if parent else 30
if max_redirects is None:
max_redirects = parent.max_redirects if parent else 10
if keep_alive is None:
keep_alive = parent.keep_alive if parent else True
if auth is None:
auth = parent.auth if parent else None
if headers is None:
headers = {}
if parent and parent.headers:
headers.update(parent.headers)
if verify_ssl is None:
verify_ssl = parent.verify_ssl if parent else False
if ssl_options is None:
ssl_options = parent.ssl_options if parent else None
# Do some logic about SSL verification.
if verify_ssl:
if not ssl_options:
# This logic comes from requests.
loc = None
if verify_ssl is not True:
loc = verify_ssl
if not loc:
loc = os.environ.get('PANTS_CA_BUNDLE')
if not loc:
loc = os.environ.get('CURL_CA_BUNDLE')
if not loc:
try:
import certifi
loc = certifi.where()
except __HOLE__:
pass
if not loc:
raise RuntimeError("Cannot find certificates for SSL "
"verification.")
ssl_options = {'ca_certs': loc, 'cert_reqs': ssl.CERT_REQUIRED}
# Make sure we've got backports.ssl_match_hostname
if not match_hostname:
raise RuntimeError("Cannot verify SSL certificates without "
"the package backports.ssl_match_hostname.")
# Ensure the cookies are a cookiejar.
if cookies is None:
cookies = Cookie.SimpleCookie()
elif isinstance(cookies, dict):
cookies = Cookie.SimpleCookie(cookies)
# Store our settings now.
self.on_response = on_response
self.on_headers = on_headers
self.on_progress = on_progress
self.on_ssl_error = on_ssl_error
self.on_error = on_error
self.timeout = timeout
self.max_redirects = max_redirects
self.keep_alive = keep_alive
self.auth = auth
self.headers = headers
self.cookies = cookies
self.verify_ssl = verify_ssl
self.ssl_options = ssl_options
##### Session Generation ##################################################
|
ImportError
|
dataset/ETHPy150Open ecdavis/pants/pants/http/client.py/Session.__init__
|
9,771
|
def __getattr__(self, attr_name):
"""Makes the API calls and gets the result."""
try:
return object.__getattr__(self, attr_name)
except AttributeError:
def vim_request_handler(managed_object, **kwargs):
"""
Builds the SOAP message and parses the response for fault
checking and other errors.
managed_object : Managed Object Reference or Managed
Object Name
**kwargs : Keyword arguments of the call
"""
# Dynamic handler for VI SDK Calls
try:
request_mo = \
self._request_managed_object_builder(managed_object)
request = getattr(self.client.service, attr_name)
response = request(request_mo, **kwargs)
# To check for the faults that are part of the message body
# and not returned as Fault object response from the ESX
# SOAP server
if hasattr(error_util.FaultCheckers,
attr_name.lower() + "_fault_checker"):
fault_checker = getattr(error_util.FaultCheckers,
attr_name.lower() + "_fault_checker")
fault_checker(response)
return response
# Catch the VimFaultException that is raised by the fault
# check of the SOAP response
except error_util.VimFaultException, excep:
raise
except suds.WebFault, excep:
doc = excep.document
detail = doc.childAtPath("/Envelope/Body/Fault/detail")
fault_list = []
for child in detail.getChildren():
fault_list.append(child.get("type"))
raise error_util.VimFaultException(fault_list, excep)
except __HOLE__, excep:
raise error_util.VimAttributeError(_("No such SOAP method "
"'%s' provided by VI SDK") % (attr_name), excep)
except (httplib.CannotSendRequest,
httplib.ResponseNotReady,
httplib.CannotSendHeader), excep:
raise error_util.SessionOverLoadException(_("httplib "
"error in %s: ") % (attr_name), excep)
except Exception, excep:
# Socket errors which need special handling for they
# might be caused by ESX API call overload
if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
str(excep).find(CONN_ABORT_ERROR)) != -1:
raise error_util.SessionOverLoadException(_("Socket "
"error in %s: ") % (attr_name), excep)
# Type error that needs special handling for it might be
# caused by ESX host API call overload
elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
raise error_util.SessionOverLoadException(_("Type "
"error in %s: ") % (attr_name), excep)
else:
raise error_util.VimException(
_("Exception in %s ") % (attr_name), excep)
return vim_request_handler
|
AttributeError
|
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/virt/vmwareapi/vim.py/Vim.__getattr__
|
9,772
|
def unregister(self, name):
try:
del self._registry[name]
except __HOLE__:
pass
# manage a global request reference for those JSON handlers that want access to the
# original request.
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/json.py/JSONEncoder.unregister
|
9,773
|
def __call__(self, js_object):
try:
jsonclass = js_object["_class_"]
except KeyError:
# default returns dict with keys converted to strings.
return dict((str(k), v) for k, v in js_object.items())
else:
try:
decoder = self._registry[jsonclass]
except __HOLE__:
raise ValueError("JSON decoder for %r not registered." % (jsonclass,))
else:
return decoder(js_object["value"])
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/json.py/JSONObjectDecoder.__call__
|
9,774
|
def unregister(self, name):
try:
del self._registry[name]
except __HOLE__:
pass
# default encoder-decoder. This is all you need.
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/json.py/JSONObjectDecoder.unregister
|
9,775
|
def create_attr_list_type(class_name, *fields):
"""Create a new attr_list_type which is a class offering get and set
methods which is capable of serializing and deserializing itself from
netlink message. The fields are a bunch of tuples of name and a class
which should provide pack and unpack (except for in the case where we
know it will be used exclusively for serialization or deserialization).
attr_list_types can be used as packers in other attr_list_types. The
names and packers of the field should be taken from the appropriate
linux kernel header and source files.
"""
name_to_key = {}
key_to_name = {}
key_to_packer = {}
for i, (name, packer) in enumerate(fields):
key = i + 1
name_to_key[name.upper()] = key
key_to_name[key] = name
key_to_packer[key] = packer
class AttrListType(AttrListPacker):
def __init__(self, **kwargs):
self.attrs = {}
for k, v in kwargs.items():
if v is not None:
self.set(k, v)
def set(self, key, value):
if not isinstance(key, int):
key = name_to_key[key.upper()]
self.attrs[key] = value
def get(self, key, default=_unset):
try:
if not isinstance(key, int):
key = name_to_key[key.upper()]
return self.attrs[key]
except __HOLE__:
if default is not _unset:
return default
raise
def __repr__(self):
attrs = ['%s=%s' % (key_to_name[k].lower(), repr(v))
for k, v in self.attrs.items()]
return '%s(%s)' % (class_name, ', '.join(attrs))
@staticmethod
def pack(attr_list):
packed = array.array(str('B'))
for k, v in attr_list.attrs.items():
if key_to_packer[k] == RecursiveSelf:
x = AttrListType.pack(v)
else:
x = key_to_packer[k].pack(v)
alen = len(x) + 4
# TODO(agartrell): This is scary. In theory, we should OR
# 1 << 15 into the length if it is an instance of
# AttrListPacker, but this didn't work for some reason, so
# we're not going to.
packed.fromstring(struct.pack(str('=HH'), alen, k))
packed.fromstring(x)
packed.fromstring('\0' * ((4 - (len(x) % 4)) & 0x3))
return packed
@staticmethod
def unpack(data):
global global_nest
attr_list = AttrListType()
while len(data) > 0:
alen, k = struct.unpack(str('=HH'), data[:4])
alen = alen & 0x7fff
if key_to_packer[k] == RecursiveSelf:
v = AttrListType.unpack(data[4:alen])
else:
v = key_to_packer[k].unpack(data[4:alen])
attr_list.set(k, v)
data = data[((alen + 3) & (~3)):]
return attr_list
return AttrListType
|
KeyError
|
dataset/ETHPy150Open facebook/gnlpy/netlink.py/create_attr_list_type
|
9,776
|
def __str__(self):
try:
error_str = '%s: %s' % (errno.errorcode[-self.error],
os.strerror(-self.error))
except __HOLE__:
error_str = str(self.error)
return '%s. Extra info: %s' % (error_str, self.msg)
|
KeyError
|
dataset/ETHPy150Open facebook/gnlpy/netlink.py/ErrorMessage.__str__
|
9,777
|
def __getattr__(self, name):
try:
return self[name]
except __HOLE__ as e:
raise AttributeError(e)
|
KeyError
|
dataset/ETHPy150Open openstack/neutron/neutron/agent/linux/dhcp.py/DictModel.__getattr__
|
9,778
|
def _destroy_namespace_and_port(self):
try:
self.device_manager.destroy(self.network, self.interface_name)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete interface: %s'),
self.interface_name)
ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except __HOLE__:
LOG.warning(_LW('Failed trying to delete namespace: %s'),
self.network.namespace)
|
RuntimeError
|
dataset/ETHPy150Open openstack/neutron/neutron/agent/linux/dhcp.py/DhcpLocalProcess._destroy_namespace_and_port
|
9,779
|
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter(f.read()) if converter else f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except __HOLE__:
msg = _('Unable to access %s')
LOG.debug(msg, file_name)
return None
|
IOError
|
dataset/ETHPy150Open openstack/neutron/neutron/agent/linux/dhcp.py/DhcpLocalProcess._get_value_from_conf_file
|
9,780
|
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = cls.get_confs_dir(conf)
try:
return [
c for c in os.listdir(confs_dir)
if uuidutils.is_uuid_like(c)
]
except __HOLE__:
return []
|
OSError
|
dataset/ETHPy150Open openstack/neutron/neutron/agent/linux/dhcp.py/Dnsmasq.existing_dhcp_networks
|
9,781
|
def _build_cmdline_callback(self, pid_file):
# We ignore local resolv.conf if dns servers are specified
# or if local resolution is explicitly disabled.
_no_resolv = (
'--no-resolv' if self.conf.dnsmasq_dns_servers or
not self.conf.dnsmasq_local_resolv else '')
cmd = [
'dnsmasq',
'--no-hosts',
_no_resolv,
'--strict-order',
'--except-interface=lo',
'--pid-file=%s' % pid_file,
'--dhcp-hostsfile=%s' % self.get_conf_file_name('host'),
'--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'),
'--dhcp-optsfile=%s' % self.get_conf_file_name('opts'),
'--dhcp-leasefile=%s' % self.get_conf_file_name('leases'),
'--dhcp-match=set:ipxe,175',
]
if self.device_manager.driver.bridged:
cmd += [
'--bind-interfaces',
'--interface=%s' % self.interface_name,
]
else:
cmd += [
'--bind-dynamic',
'--interface=%s' % self.interface_name,
'--interface=tap*',
'--bridge-interface=%s,tap*' % self.interface_name,
]
possible_leases = 0
for i, subnet in enumerate(self.network.subnets):
mode = None
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# Note(scollins) If the IPv6 attributes are not set, set it as
# static to preserve previous behavior
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
ra_mode = getattr(subnet, 'ipv6_ra_mode', None)
if (addr_mode in [n_const.DHCPV6_STATEFUL,
n_const.DHCPV6_STATELESS] or
not addr_mode and not ra_mode):
mode = 'static'
cidr = netaddr.IPNetwork(subnet.cidr)
if self.conf.dhcp_lease_duration == -1:
lease = 'infinite'
else:
lease = '%ss' % self.conf.dhcp_lease_duration
# mode is optional and is not set - skip it
if mode:
if subnet.ip_version == 4:
cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode, lease))
else:
cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode,
cidr.prefixlen, lease))
possible_leases += cidr.size
if cfg.CONF.advertise_mtu:
mtu = getattr(self.network, 'mtu', 0)
# Do not advertise unknown mtu
if mtu > 0:
cmd.append('--dhcp-option-force=option:mtu,%d' % mtu)
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_servers:
cmd.extend(
'--server=%s' % server
for server in self.conf.dnsmasq_dns_servers)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
if self.conf.dhcp_broadcast_reply:
cmd.append('--dhcp-broadcast')
if self.conf.dnsmasq_base_log_dir:
log_dir = os.path.join(
self.conf.dnsmasq_base_log_dir,
self.network.id)
try:
if not os.path.exists(log_dir):
os.makedirs(log_dir)
except __HOLE__:
LOG.error(_LE('Error while create dnsmasq log dir: %s'),
log_dir)
else:
log_filename = os.path.join(log_dir, 'dhcp_dns_log')
cmd.append('--log-queries')
cmd.append('--log-dhcp')
cmd.append('--log-facility=%s' % log_filename)
return cmd
|
OSError
|
dataset/ETHPy150Open openstack/neutron/neutron/agent/linux/dhcp.py/Dnsmasq._build_cmdline_callback
|
9,782
|
def _read_hosts_file_leases(self, filename):
leases = set()
try:
with open(filename) as f:
for l in f.readlines():
host = l.strip().split(',')
mac = host[0]
client_id = None
if host[1].startswith('set:'):
continue
if host[1].startswith(self._ID):
ip = host[3].strip('[]')
client_id = host[1][len(self._ID):]
else:
ip = host[2].strip('[]')
leases.add((ip, mac, client_id))
except (__HOLE__, IOError):
LOG.debug('Error while reading hosts file %s', filename)
return leases
|
OSError
|
dataset/ETHPy150Open openstack/neutron/neutron/agent/linux/dhcp.py/Dnsmasq._read_hosts_file_leases
|
9,783
|
def __init__(self, path, data, command_line):
# This reads the configuration file as well
try:
Likelihood_sn.__init__(self, path, data, command_line)
except __HOLE__:
raise io_mp.LikelihoodError(
"The JLA data files were not found. Please download the "
"following link "
"http://supernovae.in2p3.fr/sdss_snls_jla/jla_likelihood_v4.tgz"
", extract it, and copy all files present in "
"`jla_likelihood_v4/data` to `your_montepython/data/JLA`")
# read the only matrix
self.C00 = self.read_matrix(self.mu_covmat_file)
# Read the simplified light-curve self.data_file
self.light_curve_params = self.read_light_curve_parameters()
# The covariance matrix can be already inverted, once and for all
# (cholesky)
self.C00 = la.cholesky(self.C00, lower=True, overwrite_a=True)
|
IOError
|
dataset/ETHPy150Open baudren/montepython_public/montepython/likelihoods/JLA_simple/__init__.py/JLA_simple.__init__
|
9,784
|
def _migrate_get_earliest_version():
versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py')
versions = []
for path in glob.iglob(versions_glob):
filename = os.path.basename(path)
prefix = filename.split('_', 1)[0]
try:
version = int(prefix)
except __HOLE__:
pass
versions.append(version)
versions.sort()
return versions[0]
### Git
|
ValueError
|
dataset/ETHPy150Open openstack/ec2-api/tools/db/schema_diff.py/_migrate_get_earliest_version
|
9,785
|
def parse_options():
try:
db_type = sys.argv[1]
except __HOLE__:
usage("must specify DB type")
try:
orig_branch, orig_version = sys.argv[2].split(':')
except IndexError:
usage('original branch and version required (e.g. master:82)')
try:
new_branch, new_version = sys.argv[3].split(':')
except IndexError:
usage('new branch and version required (e.g. master:82)')
return db_type, orig_branch, orig_version, new_branch, new_version
|
IndexError
|
dataset/ETHPy150Open openstack/ec2-api/tools/db/schema_diff.py/parse_options
|
9,786
|
def testGetPluginDir(self):
# Test that plugin_install_path option takes first precedence
conf = config.getConfig()
conf['plugins']['plugin_install_path'] = 'use_this_plugin_dir'
self.assertEqual(plugin_utilities.getPluginDir(),
'use_this_plugin_dir')
try:
shutil.rmtree('use_this_plugin_dir')
except __HOLE__:
pass
del conf['plugins']['plugin_install_path']
|
OSError
|
dataset/ETHPy150Open girder/girder/tests/cases/plugin_utilities_test.py/PluginUtilitiesTestCase.testGetPluginDir
|
9,787
|
def check_dependencies():
"""Ensure required tools for installation are present.
"""
print("Checking required dependencies...")
for cmd, url in [("wget", "http://www.gnu.org/software/wget/")]:
try:
retcode = subprocess.call([cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except __HOLE__:
retcode = 127
if retcode == 127:
raise OSError("gemini requires %s (%s)" % (cmd, url))
else:
print(" %s found" % cmd)
|
OSError
|
dataset/ETHPy150Open arq5x/gemini/gemini/scripts/gemini_install.py/check_dependencies
|
9,788
|
def handle(self, *args, **options):
super(Command, self).handle(*args, **options)
# set / compute any attributes that multiple class methods need
self.app_name = options["app_name"]
self.keep_files = options["keep_files"]
self.test_mode = options['test_data']
self.downloading = options['download']
self.cleaning = options['clean']
self.loading = options['load']
self.noinput = options['noinput']
if self.test_mode:
# and always keep files when running test data
self.keep_files = True
self.data_dir = get_test_download_directory()
# need to set this app-wide because cleancalaccessrawfile
# also calls get_download_directory
settings.CALACCESS_DOWNLOAD_DIR = self.data_dir
self.noinput = True
else:
self.data_dir = get_download_directory()
os.path.exists(self.data_dir) or os.makedirs(self.data_dir)
self.zip_path = os.path.join(self.data_dir, 'calaccess.zip')
self.tsv_dir = os.path.join(self.data_dir, "tsv/")
# Immediately check that the tsv directory exists when using test data,
# so we can stop immediately.
if self.test_mode:
if not os.path.exists(self.tsv_dir):
raise CommandError("Data tsv directory does not exist "
"at %s" % self.tsv_dir)
elif self.verbosity:
self.log("Using test data")
self.csv_dir = os.path.join(self.data_dir, "csv/")
os.path.exists(self.csv_dir) or os.makedirs(self.csv_dir)
if self.test_mode:
with open(self.data_dir + "/sampled_version.txt", "r") as f:
current_release_datetime = f.readline()
expected_size = f.readline()
else:
download_metadata = self.get_download_metadata()
current_release_datetime = download_metadata['last-modified']
expected_size = download_metadata['content-length']
last_started_update = self.get_last_log()
if self.test_mode:
last_download = None
else:
try:
last_download = self.command_logs.filter(
command='downloadcalaccessrawdata'
).order_by('-start_datetime')[0]
except __HOLE__:
last_download = None
up_to_date = False
can_resume = False
# if there's a previously started update
if last_started_update:
# if current release datetime matches version of last started update
if current_release_datetime == last_started_update.version.release_datetime:
# if the last update finished
if last_started_update.finish_datetime:
up_to_date = True
else:
# if the last update didn't finish
# (but is still for the current version)
can_resume = True
# if the last started update didn't finish
elif not last_started_update.finish_datetime:
# can resume update of old version as long as skipping download
if not self.downloading:
can_resume = True
# or if there is a last download
elif last_download:
# and last download's version matches the outstanding update version
if last_download.version == last_started_update.version:
# and last download completed
if last_download.finish_datetime:
can_resume = True
if self.noinput:
# if not taking input and can resume, automatically go into resume mode
self.resume_mode = can_resume
else:
prompt_context = dict(
current_release_datetime=current_release_datetime,
expected_size=size(expected_size),
up_to_date=up_to_date,
can_resume=can_resume,
)
last_finished_update = self.get_last_log(finished=True)
if last_finished_update:
loaded_v = last_finished_update.version
prompt_context['since_loaded_version'] = naturaltime(loaded_v.release_datetime)
else:
prompt_context['since_loaded_version'] = None
prompt = render_to_string(
'calaccess_raw/updatecalaccessrawdata.txt',
prompt_context,
)
if can_resume:
if self.confirm_proceed(prompt):
self.resume_mode = True
else:
self.resume_mode = False
if not self.confirm_proceed('Do you want re-start your update?\n'):
raise CommandError("Update cancelled")
else:
self.resume_mode = False
if not self.confirm_proceed(prompt):
raise CommandError("Update cancelled")
if self.resume_mode:
self.log_record = last_started_update
else:
# get or create a version
# .get_or_create() throws IntegrityError
try:
version = self.raw_data_versions.get(
release_datetime=current_release_datetime
)
except RawDataVersion.DoesNotExist:
version = self.raw_data_versions.create(
release_datetime=current_release_datetime,
size=expected_size
)
# create a new log record
self.log_record = self.command_logs.create(
version=version,
command=self,
called_by=self.get_caller_log()
)
# if the user could have resumed but didn't
force_restart_download = can_resume and not self.resume_mode
# if not skipping download, and there's a previous download
if self.downloading and last_download:
# if not forcing a restart
if not force_restart_download:
# check if version we are updating is last one being downloaded
if self.log_record.version == last_download.version:
# if it finished
if last_download.finish_datetime:
self.log('Already downloaded.')
self.downloading = False
if self.downloading:
if self.test_mode:
call_command(
"downloadcalaccessrawdatatest",
verbosity=self.verbosity,
)
else:
call_command(
"downloadcalaccessrawdata",
keep_files=self.keep_files,
verbosity=self.verbosity,
noinput=True,
restart=force_restart_download,
)
if self.verbosity:
self.duration()
# execute the other steps that haven't been skipped
if options['clean']:
self.clean()
if self.verbosity:
self.duration()
if options['load']:
self.load()
if self.verbosity:
self.duration()
if self.verbosity:
self.success("Done!")
self.log_record.finish_datetime = now()
self.log_record.save()
|
IndexError
|
dataset/ETHPy150Open california-civic-data-coalition/django-calaccess-raw-data/calaccess_raw/management/commands/updatecalaccessrawdata.py/Command.handle
|
9,789
|
def svn_version():
_version = version()
src_dir = get_base_dir()
revision = 0
base_url = None
urlre = re.compile('url="([^"]+)"')
revre = re.compile('committed-rev="(\d+)"')
for base, dirs, files in os.walk(src_dir):
if '.svn' not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove('.svn')
f = open(os.path.join(base, '.svn', 'entries'))
data = f.read()
f.close()
if data[:1] in ('8', '9') or data[:2] == '10':
# SVN >= 1.4
data = [ d.splitlines() for d in data.split('\n\x0c\n') ]
del data[0][0] # get rid of the '8'
dirurl = data[0][3]
try:
localrev = max([int(d[9]) for d in data if len(d)>9 and d[9]])
except __HOLE__:
pass # may be some newly added directory
elif data.startswith('<?xml'):
# SVN <= 1.3
dirurl = urlre.search(data).group(1) # get repository URL
revs = [int(m.group(1)) for m in revre.finditer(data)]
if revs:
localrev = max(revs)
else:
from warnings import warn
warn("unrecognized .svn/entries format; skipping "+base)
dirs[:] = []
continue
if base_url is None:
base_url = dirurl+'/' # save the root url
elif not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
result = _version
if revision:
result += '-' + str(revision)
if 'dev' in _version:
result = fix_alphabeta(result, 'dev')
elif 'alpha' in _version:
result = fix_alphabeta(result, 'alpha')
if 'beta' in _version:
result = fix_alphabeta(result, 'beta')
return result
|
ValueError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/lxml-3.3.6/versioninfo.py/svn_version
|
9,790
|
def split_version(version):
find_digits = re.compile('([0-9]+)(.*)').match
l = []
for part in version.split('.'):
try:
l.append( int(part) )
except __HOLE__:
match = find_digits(part)
if match:
l.append( int(match.group(1)) )
l.append( match.group(2) )
return tuple(l)
|
ValueError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/lxml-3.3.6/versioninfo.py/split_version
|
9,791
|
def label(self, modelFrom, role, lang, returnMultiple=False, returnText=True, linkroleHint=None):
shorterLangInLabel = longerLangInLabel = None
shorterLangLabels = longerLangLabels = None
langLabels = []
wildRole = role == '*'
labels = self.fromModelObject(modelFrom)
if linkroleHint: # order of preference of linkroles to find label
try:
testHintedLinkrole = self._testHintedLabelLinkrole
except __HOLE__:
self._testHintedLabelLinkrole = testHintedLinkrole = (len(self.linkRoleUris) > 1)
if testHintedLinkrole:
labelsHintedLink = []
labelsDefaultLink = []
labelsOtherLinks = []
for modelLabelRel in labels:
label = modelLabelRel.toModelObject
if wildRole or role == label.role:
linkrole = modelLabelRel.linkrole
if linkrole == linkroleHint:
labelsHintedLink.append(modelLabelRel)
elif linkrole == XbrlConst.defaultLinkRole:
labelsDefaultLink.append(modelLabelRel)
else:
labelsOtherLinks.append(modelLabelRel)
labels = (labelsHintedLink or labelsDefaultLink or labelsOtherLinks)
if len(labels) > 1: # order by priority (ignoring equivalence of relationships)
labels.sort(key=lambda rel: rel.priority, reverse=True)
for modelLabelRel in labels:
label = modelLabelRel.toModelObject
if wildRole or role == label.role:
labelLang = label.xmlLang
text = label.textValue if returnText else label
if lang is None or len(lang) == 0 or lang == labelLang:
langLabels.append(text)
if not returnMultiple:
break
elif labelLang.startswith(lang):
if not longerLangInLabel or len(longerLangInLabel) > len(labelLang):
longerLangInLabel = labelLang
longerLangLabels = [text,]
else:
longerLangLabels.append(text)
elif lang.startswith(labelLang):
if not shorterLangInLabel or len(shorterLangInLabel) < len(labelLang):
shorterLangInLabel = labelLang
shorterLangLabels = [text,]
else:
shorterLangLabels.append(text)
if langLabels:
if returnMultiple: return langLabels
else: return langLabels[0]
if shorterLangLabels: # more general has preference
if returnMultiple: return shorterLangLabels
else: return shorterLangLabels[0]
if longerLangLabels:
if returnMultiple: return longerLangLabels
else: return longerLangLabels[0]
return None
|
AttributeError
|
dataset/ETHPy150Open Arelle/Arelle/arelle/ModelRelationshipSet.py/ModelRelationshipSet.label
|
9,792
|
def _setup(self):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except __HOLE__:
# NOTE: This is arguably an EnvironmentError, but that causes
# problems with Python's interactive help.
raise ImportError("Settings cannot be imported, because environment variable %s is undefined." % ENVIRONMENT_VARIABLE)
self._wrapped = Settings(settings_module)
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/conf/__init__.py/LazySettings._setup
|
9,793
|
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except __HOLE__, e:
raise ImportError("Could not import settings '%s' (Is it on sys.path? Does it have syntax errors?): %s" % (self.SETTINGS_MODULE, e))
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
# Expand entries in INSTALLED_APPS like "django.contrib.*" to a list
# of all those apps.
new_installed_apps = []
for app in self.INSTALLED_APPS:
if app.endswith('.*'):
app_mod = importlib.import_module(app[:-2])
appdir = os.path.dirname(app_mod.__file__)
app_subdirs = os.listdir(appdir)
app_subdirs.sort()
name_pattern = re.compile(r'[a-zA-Z]\w*')
for d in app_subdirs:
if name_pattern.match(d) and os.path.isdir(os.path.join(appdir, d)):
new_installed_apps.append('%s.%s' % (app[:-2], d))
else:
new_installed_apps.append(app)
self.INSTALLED_APPS = new_installed_apps
if hasattr(time, 'tzset') and getattr(self, 'TIME_ZONE'):
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
|
ImportError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/conf/__init__.py/Settings.__init__
|
9,794
|
def delete(self, using=None):
cid = self.pk
for image in self.basecollectionitem_set.instance_of(Image):
image.delete()
for nidmresult in self.basecollectionitem_set.instance_of(NIDMResults):
nidmresult.delete()
ret = super(Collection, self).delete(using=using)
collDir = os.path.join(PRIVATE_MEDIA_ROOT, 'images',str(cid))
try:
shutil.rmtree(collDir)
except __HOLE__:
print 'Image directory for collection %s does not exist' %cid
return ret
|
OSError
|
dataset/ETHPy150Open NeuroVault/NeuroVault/neurovault/apps/statmaps/models.py/Collection.delete
|
9,795
|
def get_thumbnail_url(self):
try:
url = self.thumbnail.url
except __HOLE__:
url = os.path.abspath(os.path.join("/static","images","glass_brain_empty.jpg"))
return url
|
ValueError
|
dataset/ETHPy150Open NeuroVault/NeuroVault/neurovault/apps/statmaps/models.py/Image.get_thumbnail_url
|
9,796
|
@classmethod
def get_import_file_headers(cls, csv_file):
filename = csv_file
headers = []
try:
workbook = open_workbook(filename.name, 'rb')
records = []
for sheet in workbook.sheets():
# read our header
header = []
for col in range(sheet.ncols):
header.append(six.text_type(sheet.cell(0, col).value))
headers = [cls.normalize_value(_).lower() for _ in header]
#only care for the first sheet
break
except XLRDError:
# our alternative codec, by default we are the crazy windows encoding
ascii_codec = 'cp1252'
# read the entire file, look for mac_roman characters
reader = open(filename.name, "rb")
for byte in reader.read():
# these are latin accented characterse in mac_roman, if we see them then our alternative
# encoding should be mac_roman
try:
byte_number = ord(byte)
except __HOLE__:
byte_number = byte
if byte_number in [0x81, 0x8d, 0x8f, 0x90, 0x9d]:
ascii_codec = 'mac_roman'
break
reader.close()
reader = open(filename.name, "rU")
def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
encoded = []
for cell in row:
try:
cell = six.text_type(cell)
except:
cell = six.text_type(cell.decode(ascii_codec))
encoded.append(cell)
yield encoded
reader = unicode_csv_reader(reader)
# read in our header
line_number = 0
header = six.next(reader)
line_number += 1
while header is not None and len(header[0]) > 1 and header[0][0] == "#":
header = six.next(reader)
line_number += 1
# do some sanity checking to make sure they uploaded the right kind of file
if len(header) < 1:
raise Exception("Invalid header for import file")
# normalize our header names, removing quotes and spaces
headers = [cls.normalize_value(_).lower() for _ in header]
return headers
|
TypeError
|
dataset/ETHPy150Open nyaruka/smartmin/smartmin/models.py/SmartModel.get_import_file_headers
|
9,797
|
@classmethod
def import_raw_csv(cls, filename, user, import_params, log=None, import_results=None):
# our alternative codec, by default we are the crazy windows encoding
ascii_codec = 'cp1252'
# read the entire file, look for mac_roman characters
reader = open(filename.name, "rb")
for byte in reader.read():
# these are latin accented characterse in mac_roman, if we see them then our alternative
# encoding should be mac_roman
try:
byte_number = ord(byte)
except __HOLE__:
byte_number = byte
if byte_number in [0x81, 0x8d, 0x8f, 0x90, 0x9d]:
ascii_codec = 'mac_roman'
break
reader.close()
reader = open(filename.name, "rU")
def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
encoded = []
for cell in row:
try:
cell = six.text_type(cell)
except:
cell = six.text_type(cell.decode(ascii_codec))
encoded.append(cell)
yield encoded
reader = unicode_csv_reader(reader)
# read in our header
line_number = 0
header = six.next(reader)
line_number += 1
while header is not None and len(header[0]) > 1 and header[0][0] == "#":
header = six.next(reader)
line_number += 1
# do some sanity checking to make sure they uploaded the right kind of file
if len(header) < 1:
raise Exception("Invalid header for import file")
# normalize our header names, removing quotes and spaces
header = [cls.normalize_value(_).lower() for _ in header]
cls.validate_import_header(header)
records = []
num_errors = 0
error_messages = []
for row in list(reader):
# trim all our values
row = [cls.normalize_value(_) for _ in row]
line_number += 1
# make sure there are same number of fields
if len(row) != len(header):
raise Exception("Line %d: The number of fields for this row is incorrect. Expected %d but found %d." % (line_number, len(header), len(row)))
field_values = dict(zip(header, row))
field_values['created_by'] = user
field_values['modified_by'] = user
try:
field_values = cls.prepare_fields(field_values, import_params, user)
record = cls.create_instance(field_values)
if record:
records.append(record)
else:
num_errors += 1
except SmartImportRowError as e:
error_messages.append(dict(line=line_number, error=str(e)))
except Exception as e:
if log:
traceback.print_exc(100, log)
raise Exception("Line %d: %s\n\n%s" % (line_number, str(e), field_values))
if import_results is not None:
import_results['records'] = len(records)
import_results['errors'] = num_errors + len(error_messages)
import_results['error_messages'] = error_messages[:20]
return records
|
TypeError
|
dataset/ETHPy150Open nyaruka/smartmin/smartmin/models.py/SmartModel.import_raw_csv
|
9,798
|
def swapContours(f,gName1,gName2):
try:
g1 = f[gName1]
g2 = f[gName2]
except __HOLE__:
log("swapGlyphs failed for %s %s" % (gName1, gName2))
return
g3 = g1.copy()
while g1.contours:
g1.removeContour(0)
for contour in g2.contours:
g1.appendContour(contour)
g1.width = g2.width
while g2.contours:
g2.removeContour(0)
for contour in g3.contours:
g2.appendContour(contour)
g2.width = g3.width
|
KeyError
|
dataset/ETHPy150Open google/roboto/scripts/lib/fontbuild/Build.py/swapContours
|
9,799
|
def get_leader(self):
"""
Finds the leader of the ladder
"""
totals = {}
for result in self.result_set.filter(ladder=self):
try:
if result.result == 9:
totals[result.player] += int(result.result) + 3
else:
totals[result.player] += int(result.result) + 1
except __HOLE__:
if result.result == 9:
totals[result.player] = int(result.result) + 3
else:
totals[result.player] = int(result.result) + 1
if totals:
player = max(iter(totals.items()), key=operator.itemgetter(1))[0]
else:
return {'player': 'No Results', 'player_id': '../#', 'total': '-', 'division': self.division}
return {'player': player.__str__(), 'player_id': player.id, 'total': totals[player], 'division': self.division}
|
KeyError
|
dataset/ETHPy150Open jzahedieh/django-tennis-ladder/ladder/models.py/Ladder.get_leader
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.