Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
1,300
|
def get_week(self):
"""
Return the week for which this view should display data
"""
week = self.week
if week is None:
try:
week = self.kwargs['week']
except KeyError:
try:
week = self.request.GET['week']
except __HOLE__:
raise Http404(_("No week specified"))
return week
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/generic/dates.py/WeekMixin.get_week
|
1,301
|
def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'):
"""
Helper: get a datetime.date object given a format string and a year,
month, and day (only year is mandatory). Raise a 404 for an invalid date.
"""
format = delim.join((year_format, month_format, day_format))
datestr = delim.join((year, month, day))
try:
return datetime.datetime.strptime(datestr, format).date()
except __HOLE__:
raise Http404(_("Invalid date string '%(datestr)s' given format '%(format)s'") % {
'datestr': datestr,
'format': format,
})
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/generic/dates.py/_date_from_string
|
1,302
|
def _get_next_prev(generic_view, date, is_previous, period):
"""
Helper: Get the next or the previous valid date. The idea is to allow
links on month/day views to never be 404s by never providing a date
that'll be invalid for the given view.
This is a bit complicated since it handles different intervals of time,
hence the coupling to generic_view.
However in essence the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day/week/month,
reguardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive result
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
get_current = getattr(generic_view, '_get_current_%s' % period)
get_next = getattr(generic_view, '_get_next_%s' % period)
# Bounds of the current interval
start, end = get_current(date), get_next(date)
# If allow_empty is True, the naive result will be valid
if allow_empty:
if is_previous:
result = get_current(start - datetime.timedelta(days=1))
else:
result = end
if allow_future or result <= timezone_today():
return result
else:
return None
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on whether we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lt' % date_field: generic_view._make_date_lookup_arg(start)}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(end)}
ordering = date_field
# Filter out objects in the future if appropriate.
if not allow_future:
# Fortunately, to match the implementation of allow_future,
# we need __lte, which doesn't conflict with __lt above.
if generic_view.uses_datetime_field:
now = timezone.now()
else:
now = timezone_today()
lookup['%s__lte' % date_field] = now
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except __HOLE__:
return None
# Convert datetimes to dates in the current time zone.
if generic_view.uses_datetime_field:
if settings.USE_TZ:
result = timezone.localtime(result)
result = result.date()
# Return the first day of the period.
return get_current(result)
|
IndexError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/generic/dates.py/_get_next_prev
|
1,303
|
def __getattr__(self, attr):
try:
return getattr(self._obj, attr)
except __HOLE__:
return wrapped(self._obj.Get, attr)
|
AttributeError
|
dataset/ETHPy150Open tjguk/winsys/winsys/active_directory.py/IADs.__getattr__
|
1,304
|
def _parseClientTCP(*args, **kwargs):
"""
Perform any argument value coercion necessary for TCP client parameters.
Valid positional arguments to this function are host and port.
Valid keyword arguments to this function are all L{IReactorTCP.connectTCP}
arguments.
@return: The coerced values as a C{dict}.
"""
if len(args) == 2:
kwargs['port'] = int(args[1])
kwargs['host'] = args[0]
elif len(args) == 1:
if 'host' in kwargs:
kwargs['port'] = int(args[0])
else:
kwargs['host'] = args[0]
try:
kwargs['port'] = int(kwargs['port'])
except __HOLE__:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
return kwargs
|
KeyError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/internet/endpoints.py/_parseClientTCP
|
1,305
|
def _loadCAsFromDir(directoryPath):
"""
Load certificate-authority certificate objects in a given directory.
@param directoryPath: a L{FilePath} pointing at a directory to load .pem
files from.
@return: a C{list} of L{OpenSSL.crypto.X509} objects.
"""
from twisted.internet import ssl
caCerts = {}
for child in directoryPath.children():
if not child.basename().split('.')[-1].lower() == 'pem':
continue
try:
data = child.getContent()
except __HOLE__:
# Permission denied, corrupt disk, we don't care.
continue
try:
theCert = ssl.Certificate.loadPEM(data)
except ssl.SSL.Error:
# Duplicate certificate, invalid certificate, etc. We don't care.
pass
else:
caCerts[theCert.digest()] = theCert.original
return caCerts.values()
|
IOError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/internet/endpoints.py/_loadCAsFromDir
|
1,306
|
def _parseClientUNIX(*args, **kwargs):
"""
Perform any argument value coercion necessary for UNIX client parameters.
Valid keyword arguments to this function are all L{IReactorUNIX.connectUNIX}
keyword arguments except for C{checkPID}. Instead, C{lockfile} is accepted
and has the same meaning. Also C{path} is used instead of C{address}.
Valid positional arguments to this function are C{path}.
@return: The coerced values as a C{dict}.
"""
if len(args) == 1:
kwargs['path'] = args[0]
try:
kwargs['checkPID'] = bool(int(kwargs.pop('lockfile')))
except __HOLE__:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
return kwargs
|
KeyError
|
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/internet/endpoints.py/_parseClientUNIX
|
1,307
|
def _find_calls(self):
calls = []
stack = []
for m in self._profile_mmaps:
m.seek(0)
while True:
b = m.read(struct.calcsize("=B"))
if not b:
break
marker, = struct.unpack("=B", b)
if marker == 0:
m.close()
break
elif marker != PROFILE_IDENTIFIER:
raise Exception
event_id, timestamp = read_unpack("=Bd", m)
if event_id == CALL_EVENT:
func_name_len, = read_unpack("=L", m)
func_name = m.read(func_name_len)
stack.append((func_name, timestamp, []))
elif event_id == RETURN_EVENT:
try:
prev_func_name, prev_timestamp, subcalls = stack.pop()
except __HOLE__:
# The function where the profile hook was enabled
# (and everything up the stack from there) will
# have returns recorded, but no call, so we ignore
# them.
if not stack:
continue
call = PythonCall(prev_func_name, prev_timestamp, timestamp, subcalls)
if stack:
stack[-1][2].append(call)
else:
calls.append(call)
while stack:
prev_func_name, prev_timestamp, subcalls = stack.pop()
call = PythonCall(prev_func_name, prev_timestamp, self._end_time, subcalls)
if stack:
stack[-1][2].append(call)
else:
calls.append(call)
self.calls = calls
|
IndexError
|
dataset/ETHPy150Open alex/tracebin/client/tracebin/recorder.py/Recorder._find_calls
|
1,308
|
def __getitem__(self, key):
for dict in self.dicts:
try:
return dict[key]
except __HOLE__:
pass
raise KeyError
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/datastructures.py/MergeDict.__getitem__
|
1,309
|
def get(self, key, default=None):
try:
return self[key]
except __HOLE__:
return default
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/datastructures.py/MergeDict.get
|
1,310
|
def getlist(self, key):
for dict in self.dicts:
try:
return dict.getlist(key)
except __HOLE__:
pass
raise KeyError
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/datastructures.py/MergeDict.getlist
|
1,311
|
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = dict.__getitem__(self, key)
except __HOLE__:
raise MultiValueDictKeyError, "Key %r not found in %r" % (key, self)
try:
return list_[-1]
except IndexError:
return []
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/datastructures.py/MultiValueDict.__getitem__
|
1,312
|
def get(self, key, default=None):
"Returns the default value if the requested data doesn't exist"
try:
val = self[key]
except __HOLE__:
return default
if val == []:
return default
return val
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/datastructures.py/MultiValueDict.get
|
1,313
|
def getlist(self, key):
"Returns an empty list if the requested data doesn't exist"
try:
return dict.__getitem__(self, key)
except __HOLE__:
return []
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/datastructures.py/MultiValueDict.getlist
|
1,314
|
def update(self, *args, **kwargs):
"update() extends rather than replaces existing key lists. Also accepts keyword args."
if len(args) > 1:
raise TypeError, "update expected at most 1 arguments, got %d", len(args)
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except __HOLE__:
raise ValueError, "MultiValueDict.update() takes either a MultiValueDict or dictionary"
for key, value in kwargs.iteritems():
self.setlistdefault(key, []).append(value)
|
TypeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/datastructures.py/MultiValueDict.update
|
1,315
|
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except __HOLE__: # Special-case if current isn't a dict.
current = {bits[-1] : v}
|
TypeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/datastructures.py/DotExpandedDict.__init__
|
1,316
|
def Dial(proto, *args):
""" A Dial is a generic client for stream-oriented protocols.
Example::
conn, err = Dial("tcp", ('127.0.0.1', 8000))
conn.write("hello")
print(conn.read())
"""
try:
dial_func = DIAL_HANDLERS[proto]
except __HOLE__:
raise ValueError("type should be tcp, udp or unix")
return dial_func(*args)
|
KeyError
|
dataset/ETHPy150Open benoitc/flower/flower/net/__init__.py/Dial
|
1,317
|
def Listen(addr=('0.0.0.0', 0), proto="tcp", *args):
"""A Listener is a generic network listener for stream-oriented protocols.
Multiple tasks may invoke methods on a Listener simultaneously.
Example::
def handle_connection(conn):
while True:
data = conn.read()
if not data:
break
conn.write(data)
l = Listen(('127.0.0.1', 8000))
try:
while True:
try:
conn, err = l.accept()
t = tasklet(handle_connection)(conn)
except KeyboardInterrupt:
break
finally:
l.close()
run()
"""
try:
listen_class = LISTEN_HANDLERS[proto]
except __HOLE__:
raise ValueError("type should be tcp, udp or unix")
return listen_class(addr, *args)
|
KeyError
|
dataset/ETHPy150Open benoitc/flower/flower/net/__init__.py/Listen
|
1,318
|
def _checkConsistency(richInputs, fsm, inputContext):
"""
Verify that the outputs that can be generated by fsm have their
requirements satisfied by the given rich inputs.
@param richInputs: A L{list} of all of the types which will serve as rich
inputs to an L{IFiniteStateMachine}.
@type richInputs: L{list} of L{IRichInput} providers
@param fsm: The L{IFiniteStateMachine} to which these rich inputs are to be
delivered.
@param inputContext: A L{dict} mapping output symbols to L{Interface}
subclasses. Rich inputs which result in these outputs being produced
by C{fsm} must provide the corresponding interface.
@raise DoesNotImplement: If any of the rich input types fails to implement
the interfaces required by the outputs C{fsm} can produce when they are
received.
"""
for richInput in richInputs:
for state in fsm:
for input in fsm[state]:
if richInput.symbol() == input:
# This rich input will be supplied to represent this input
# symbol in this state. Check to see if it satisfies the
# output requirements.
outputs = fsm[state][input].output
for output in outputs:
try:
required = inputContext[output]
except __HOLE__:
continue
# Consider supporting non-interface based checking in
# the future: extend this to also allow
# issubclass(richInput, required)
if required.implementedBy(richInput):
continue
raise DoesNotImplement(
"%r not implemented by %r, "
"required by %r in state %r" % (
required, richInput,
input, state))
|
KeyError
|
dataset/ETHPy150Open ClusterHQ/machinist/machinist/_fsm.py/_checkConsistency
|
1,319
|
def receive(self, input):
current = self.table[self.state]
if input not in self.inputs.iterconstants():
raise IllegalInput(input)
try:
transition = current[input]
except __HOLE__:
raise UnhandledInput(self.state, input)
self.state = transition.nextState
return transition.output
|
KeyError
|
dataset/ETHPy150Open ClusterHQ/machinist/machinist/_fsm.py/_FiniteStateMachine.receive
|
1,320
|
def __init__(self, original, prefix="output_"):
"""
@param original: Any old object with a bunch of methods using the specified
method prefix.
@param prefix: The string prefix which will be used for method
dispatch. For example, if C{"foo_"} is given then to execute the
output symbol I{BAR}, C{original.foo_BAR} will be called.
@type prefix: L{str}
"""
self.original = original
self.prefix = prefix
try:
identifier = self.original.identifier
except __HOLE__:
self._identifier = repr(self.original).decode("ascii")
else:
self._identifier = identifier()
|
AttributeError
|
dataset/ETHPy150Open ClusterHQ/machinist/machinist/_fsm.py/MethodSuffixOutputer.__init__
|
1,321
|
def __get__(self, obj, cls):
if obj is None:
return self
if self._getter(obj) in self._allowed:
try:
return obj.__dict__[self]
except __HOLE__:
raise AttributeError()
raise WrongState(self, obj)
|
KeyError
|
dataset/ETHPy150Open ClusterHQ/machinist/machinist/_fsm.py/stateful.__get__
|
1,322
|
def __delete__(self, obj):
if self._getter(obj) not in self._allowed:
raise WrongState(self, obj)
try:
del obj.__dict__[self]
except __HOLE__:
raise AttributeError()
|
KeyError
|
dataset/ETHPy150Open ClusterHQ/machinist/machinist/_fsm.py/stateful.__delete__
|
1,323
|
def func_factory(method):
try:
func = getattr(math, method)
except __HOLE__:
return
def inner(arg1, arg2=None):
try:
return func(arg1, arg2)
except TypeError:
return func(arg1)
inner.__name__ = method
doc = func.__doc__.splitlines()
if len(doc) > 1 and not doc[1]:
doc = doc[2:]
inner.__doc__ = '\n'.join(doc)
if method.startswith('is'):
return comparison(inner)
return filter(function(inner))
|
AttributeError
|
dataset/ETHPy150Open justquick/django-native-tags/native_tags/contrib/math_.py/func_factory
|
1,324
|
def main(argv): # pragma: no cover
ip = "127.0.0.1"
port = 5684
try:
opts, args = getopt.getopt(argv, "hi:p:", ["ip=", "port="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-i", "--ip"):
ip = arg
elif opt in ("-p", "--port"):
port = int(arg)
server = CoAPForwardProxy(ip, port)
try:
server.listen(10)
except __HOLE__:
print "Server Shutdown"
server.close()
print "Exiting..."
|
KeyboardInterrupt
|
dataset/ETHPy150Open Tanganelli/CoAPthon/coapforwardproxy.py/main
|
1,325
|
def _get_rt_from_session(self):
"""
Returns the request token cached in the session by
``_get_request_token``
"""
try:
return self.request.session['oauth_%s_request_token'
% get_token_prefix(
self.request_token_url)]
except __HOLE__:
raise OAuthError(_('No request token saved for "%s".')
% get_token_prefix(self.request_token_url))
|
KeyError
|
dataset/ETHPy150Open pennersr/django-allauth/allauth/socialaccount/providers/oauth/client.py/OAuthClient._get_rt_from_session
|
1,326
|
def _get_at_from_session(self):
"""
Get the saved access token for private resources from the session.
"""
try:
return self.request.session['oauth_%s_access_token'
% get_token_prefix(
self.request_token_url)]
except __HOLE__:
raise OAuthError(
_('No access token saved for "%s".')
% get_token_prefix(self.request_token_url))
|
KeyError
|
dataset/ETHPy150Open pennersr/django-allauth/allauth/socialaccount/providers/oauth/client.py/OAuth._get_at_from_session
|
1,327
|
def feed_index(service, opts):
"""Feed the named index in a specific manner."""
indexname = opts.args[0]
itype = opts.kwargs['ingest']
# get index handle
try:
index = service.indexes[indexname]
except KeyError:
print "Index %s not found" % indexname
return
if itype in ["stream", "submit"]:
stream = index.attach()
else:
# create a tcp input if one doesn't exist
input_host = opts.kwargs.get("inputhost", SPLUNK_HOST)
input_port = int(opts.kwargs.get("inputport", SPLUNK_PORT))
input_name = "tcp:%s" % (input_port)
if input_name not in service.inputs.list():
service.inputs.create("tcp", input_port, index=indexname)
# connect to socket
ingest = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ingest.connect((input_host, input_port))
count = 0
lastevent = ""
try:
for i in range(0, 10):
for j in range(0, 5000):
lastevent = "%s: event bunch %d, number %d\n" % \
(datetime.datetime.now().isoformat(), i, j)
if itype == "stream":
stream.write(lastevent + "\n")
elif itype == "submit":
index.submit(lastevent + "\n")
else:
ingest.send(lastevent + "\n")
count = count + 1
print "submitted %d events, sleeping 1 second" % count
time.sleep(1)
except __HOLE__:
print "^C detected, last event written:"
print lastevent
|
KeyboardInterrupt
|
dataset/ETHPy150Open splunk/splunk-sdk-python/examples/genevents.py/feed_index
|
1,328
|
def new_object_graph(
modules=finding.ALL_IMPORTED_MODULES, classes=None, binding_specs=None,
only_use_explicit_bindings=False, allow_injecting_none=False,
configure_method_name='configure',
dependencies_method_name='dependencies',
get_arg_names_from_class_name=(
bindings.default_get_arg_names_from_class_name),
get_arg_names_from_provider_fn_name=(
providing.default_get_arg_names_from_provider_fn_name),
id_to_scope=None, is_scope_usable_from_scope=lambda _1, _2: True,
use_short_stack_traces=True):
"""Creates a new object graph.
Args:
modules: the modules in which to search for classes for which to create
implicit bindings; if None, then no modules; by default, all
modules imported at the time of calling this method
classes: the classes for which to create implicit bindings; if None (the
default), then no classes
binding_specs: the BindingSpec subclasses to get bindings and provider
methods from; if None (the default), then no binding specs
only_use_explicit_bindings: whether to use only explicit bindings (i.e.,
created by binding specs or @pinject.injectable, etc.)
allow_injecting_none: whether to allow a provider method to provide None
configure_method_name: the name of binding specs' configure method
dependencies_method_name: the name of binding specs' dependencies method
get_arg_names_from_class_name: a function mapping a class name to a
sequence of the arg names to which those classes should be
implicitly bound (if any)
get_arg_names_from_provider_fn_name: a function mapping a provider
method name to a sequence of the arg names for which that method is
a provider (if any)
id_to_scope: a map from scope ID to the concrete Scope implementation
instance for that scope
is_scope_usable_from_scope: a function taking two scope IDs and
returning whether an object in the first scope can be injected into
an object from the second scope; by default, injection is allowed
from any scope into any other scope
use_short_stack_traces: whether to shorten the stack traces for
exceptions that Pinject raises, so that they don't contain the
innards of Pinject
Returns:
an ObjectGraph
Raises:
Error: the object graph is not creatable as specified
"""
try:
if modules is not None and modules is not finding.ALL_IMPORTED_MODULES:
_verify_types(modules, types.ModuleType, 'modules')
if classes is not None:
_verify_types(classes, types.TypeType, 'classes')
if binding_specs is not None:
_verify_subclasses(
binding_specs, bindings.BindingSpec, 'binding_specs')
if get_arg_names_from_class_name is not None:
_verify_callable(get_arg_names_from_class_name,
'get_arg_names_from_class_name')
if get_arg_names_from_provider_fn_name is not None:
_verify_callable(get_arg_names_from_provider_fn_name,
'get_arg_names_from_provider_fn_name')
if is_scope_usable_from_scope is not None:
_verify_callable(is_scope_usable_from_scope,
'is_scope_usable_from_scope')
injection_context_factory = injection_contexts.InjectionContextFactory(
is_scope_usable_from_scope)
id_to_scope = scoping.get_id_to_scope_with_defaults(id_to_scope)
bindable_scopes = scoping.BindableScopes(id_to_scope)
known_scope_ids = id_to_scope.keys()
found_classes = finding.find_classes(modules, classes)
if only_use_explicit_bindings:
implicit_class_bindings = []
else:
implicit_class_bindings = bindings.get_implicit_class_bindings(
found_classes, get_arg_names_from_class_name)
explicit_bindings = bindings.get_explicit_class_bindings(
found_classes, get_arg_names_from_class_name)
binder = bindings.Binder(explicit_bindings, known_scope_ids)
required_bindings = required_bindings_lib.RequiredBindings()
if binding_specs is not None:
binding_specs = list(binding_specs)
processed_binding_specs = set()
while binding_specs:
binding_spec = binding_specs.pop()
if binding_spec in processed_binding_specs:
continue
processed_binding_specs.add(binding_spec)
all_kwargs = {'bind': binder.bind,
'require': required_bindings.require}
has_configure = hasattr(binding_spec, configure_method_name)
if has_configure:
configure_method = getattr(binding_spec, configure_method_name)
configure_kwargs = _pare_to_present_args(
all_kwargs, configure_method)
if not configure_kwargs:
raise errors.ConfigureMethodMissingArgsError(
configure_method, all_kwargs.keys())
try:
configure_method(**configure_kwargs)
except __HOLE__:
has_configure = False
dependencies = None
if hasattr(binding_spec, dependencies_method_name):
dependencies_method = (
getattr(binding_spec, dependencies_method_name))
dependencies = dependencies_method()
binding_specs.extend(dependencies)
provider_bindings = bindings.get_provider_bindings(
binding_spec, known_scope_ids,
get_arg_names_from_provider_fn_name)
explicit_bindings.extend(provider_bindings)
if (not has_configure and
not dependencies and
not provider_bindings):
raise errors.EmptyBindingSpecError(binding_spec)
binding_key_to_binding, collided_binding_key_to_bindings = (
bindings.get_overall_binding_key_to_binding_maps(
[implicit_class_bindings, explicit_bindings]))
binding_mapping = bindings.BindingMapping(
binding_key_to_binding, collided_binding_key_to_bindings)
binding_mapping.verify_requirements(required_bindings.get())
except errors.Error as e:
if use_short_stack_traces:
raise e
else:
raise
is_injectable_fn = {True: decorators.is_explicitly_injectable,
False: (lambda cls: True)}[only_use_explicit_bindings]
obj_provider = object_providers.ObjectProvider(
binding_mapping, bindable_scopes, allow_injecting_none)
return ObjectGraph(
obj_provider, injection_context_factory, is_injectable_fn,
use_short_stack_traces)
|
NotImplementedError
|
dataset/ETHPy150Open google/pinject/pinject/object_graph.py/new_object_graph
|
1,329
|
@staticmethod
def parse(filename):
if os.name in ("nt", "dos", "os2", "ce"):
lib = windll.MediaInfo
elif sys.platform == "darwin":
try:
lib = CDLL("libmediainfo.0.dylib")
except __HOLE__:
lib = CDLL("libmediainfo.dylib")
else:
lib = CDLL("libmediainfo.so.0")
# Define arguments and return types
lib.MediaInfo_Inform.restype = c_wchar_p
lib.MediaInfo_New.argtypes = []
lib.MediaInfo_New.restype = c_void_p
lib.MediaInfo_Option.argtypes = [c_void_p, c_wchar_p, c_wchar_p]
lib.MediaInfo_Option.restype = c_wchar_p
lib.MediaInfoA_Option.argtypes = [c_void_p, c_char_p, c_char_p]
lib.MediaInfoA_Option.restype = c_char_p
lib.MediaInfo_Inform.argtypes = [c_void_p, c_size_t]
lib.MediaInfo_Inform.restype = c_wchar_p
lib.MediaInfoA_Open.argtypes = [c_void_p, c_char_p]
lib.MediaInfoA_Open.restype = c_size_t
lib.MediaInfo_Delete.argtypes = [c_void_p]
lib.MediaInfo_Delete.restype = None
lib.MediaInfo_Close.argtypes = [c_void_p]
lib.MediaInfo_Close.restype = None
# Create a MediaInfo handle
handle = lib.MediaInfo_New()
lib.MediaInfo_Option(handle, "CharSet", "UTF-8")
lib.MediaInfoA_Option(None, b"Inform", b"XML")
lib.MediaInfoA_Option(None, b"Complete", b"1")
lib.MediaInfoA_Open(handle, filename.encode("utf8"))
xml = lib.MediaInfo_Inform(handle, 0)
# Delete the handle
lib.MediaInfo_Close(handle)
lib.MediaInfo_Delete(handle)
return MediaInfo(xml)
|
OSError
|
dataset/ETHPy150Open sbraz/pymediainfo/pymediainfo/__init__.py/MediaInfo.parse
|
1,330
|
def get_driver(self, resource_id):
try:
return self._drivers[resource_id]
except __HOLE__:
with excutils.save_and_reraise_exception(reraise=False):
raise cfg_exceptions.DriverNotFound(resource='router',
id=resource_id)
|
KeyError
|
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/cisco/cfg_agent/device_drivers/driver_mgr.py/DeviceDriverManager.get_driver
|
1,331
|
def get_driver_for_hosting_device(self, hd_id):
try:
return self._hosting_device_routing_drivers_binding[hd_id]
except __HOLE__:
with excutils.save_and_reraise_exception(reraise=False):
raise cfg_exceptions.DriverNotFound(resource='hosting device',
id=hd_id)
|
KeyError
|
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/cisco/cfg_agent/device_drivers/driver_mgr.py/DeviceDriverManager.get_driver_for_hosting_device
|
1,332
|
def set_driver(self, resource):
"""Set the driver for a neutron resource.
:param resource: Neutron resource in dict format. Expected keys:
{ 'id': <value>
'hosting_device': { 'id': <value>, }
'router_type': {'cfg_agent_driver': <value>, }
}
:return driver : driver object
"""
try:
resource_id = resource['id']
hosting_device = resource['hosting_device']
hd_id = hosting_device['id']
if hd_id in self._hosting_device_routing_drivers_binding:
driver = self._hosting_device_routing_drivers_binding[hd_id]
self._drivers[resource_id] = driver
else:
driver_class = resource['router_type']['cfg_agent_driver']
driver = importutils.import_object(driver_class,
**hosting_device)
self._hosting_device_routing_drivers_binding[hd_id] = driver
self._drivers[resource_id] = driver
return driver
except ImportError:
with excutils.save_and_reraise_exception(reraise=False):
LOG.exception(_LE("Error loading cfg agent driver %(driver)s "
"for hosting device template %(t_name)s"
"(%(t_id)s)"),
{'driver': driver_class, 't_id': hd_id,
't_name': resource['name']})
raise cfg_exceptions.DriverNotExist(driver=driver_class)
except __HOLE__ as e:
with excutils.save_and_reraise_exception(reraise=False):
raise cfg_exceptions.DriverNotSetForMissingParameter(e)
|
KeyError
|
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/cisco/cfg_agent/device_drivers/driver_mgr.py/DeviceDriverManager.set_driver
|
1,333
|
@cached_property
def local(self):
try:
self.storage.path('')
except __HOLE__:
return False
return True
|
NotImplementedError
|
dataset/ETHPy150Open django/django/django/contrib/staticfiles/management/commands/collectstatic.py/Command.local
|
1,334
|
def clear_dir(self, path):
"""
Deletes the given relative path using the destination storage backend.
"""
if not self.storage.exists(path):
return
dirs, files = self.storage.listdir(path)
for f in files:
fpath = os.path.join(path, f)
if self.dry_run:
self.log("Pretending to delete '%s'" %
smart_text(fpath), level=1)
else:
self.log("Deleting '%s'" % smart_text(fpath), level=1)
try:
full_path = self.storage.path(fpath)
except __HOLE__:
self.storage.delete(fpath)
else:
if not os.path.exists(full_path) and os.path.lexists(full_path):
# Delete broken symlinks
os.unlink(full_path)
else:
self.storage.delete(fpath)
for d in dirs:
self.clear_dir(os.path.join(path, d))
|
NotImplementedError
|
dataset/ETHPy150Open django/django/django/contrib/staticfiles/management/commands/collectstatic.py/Command.clear_dir
|
1,335
|
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = self.storage.get_modified_time(prefixed_path)
except (OSError, __HOLE__, AttributeError):
# The storage doesn't support get_modified_time() or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.get_modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
else:
full_path = None
# Skip the file if the source file is younger
# Avoid sub-second precision (see #14665, #19540)
if (target_last_modified.replace(microsecond=0) >= source_last_modified.replace(microsecond=0) and
full_path and not (self.symlink ^ os.path.islink(full_path))):
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True
|
NotImplementedError
|
dataset/ETHPy150Open django/django/django/contrib/staticfiles/management/commands/collectstatic.py/Command.delete_file
|
1,336
|
def link_file(self, path, prefixed_path, source_storage):
"""
Attempt to link ``path``
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally link the file
if self.dry_run:
self.log("Pretending to link '%s'" % source_path, level=1)
else:
self.log("Linking '%s'" % source_path, level=1)
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
try:
if os.path.lexists(full_path):
os.unlink(full_path)
os.symlink(source_path, full_path)
except AttributeError:
import platform
raise CommandError("Symlinking is not supported by Python %s." %
platform.python_version())
except NotImplementedError:
import platform
raise CommandError("Symlinking is not supported in this "
"platform (%s)." % platform.platform())
except __HOLE__ as e:
raise CommandError(e)
if prefixed_path not in self.symlinked_files:
self.symlinked_files.append(prefixed_path)
|
OSError
|
dataset/ETHPy150Open django/django/django/contrib/staticfiles/management/commands/collectstatic.py/Command.link_file
|
1,337
|
def walk(node, gen_subnodes, event = enter, reverse_path = False, tree=True):
"""Traverse a tree or a graph based at 'node' and generate a sequence
of paths in the graph from the initial node to the visited node.
The arguments are
@ node : an arbitrary python object used as root node.
@ gen_subnodes : a function defining the graph structure. It must
have the interface gen_subnodes(node) --> iterable containing
other nodes. This function will be called with the initial
node and the descendent nodes that it generates through
this function.
@ event: an integral value specifying which paths will be generated
during the depth-first walk. This is usually a value obtained
by composing the walk events (see below) with bitwise operators.
For example passing event = event(enter|leaf|bounce) will
generate inner nodes the first time they are entered, leaf
nodes and all the nodes every time they are revisited during
the walk.
@ reverse_path: a boolean indicating that the path should be read
from right to left (defaults to False).
@ tree: a boolean indicating that the walked graph is a tree,
which means that applying gen_subnodes() will only generate
new nodes (defaults to True). Passing True if the graph
is not a tree will walk multiple subgraphs several times,
or lead to an infinite walk and a memory error if the graph
contains cycles. When a False value is given, this function
stores all the previoulsy visited nodes during the walk.
When a True value is given, only the nodes in the current
path are stored.
Typical use:
for path in walk(node, func, event(enter|leaf)):
# this choice of events results in a preorder traversal
visited = path[-1]
if path.event & leaf:
print(visited, 'is a leaf node!')
The generated 'path' is a read-only sequence of nodes with path[0] being
the base node of the walk and path[-1] being the visited node. If
reverse_path is set to True, the path will appear from right to left,
with the visited node in position 0. During the whole walk, the function
generates the same path object, each time in a different state.
Internally, this path is implemented using a collections.deque object,
which means that indexing an element in the middle of the path (but not
near both ends) may require a time proportional to its length.
The generated paths have an attribute path.event which value is an
integer in the range [0,128[ representing a bitwise combination of
the base events (which are also integers) explained below
enter: the currently visited node is an inner node of the tree
generated before this node's subgraph is visited.
within: the currently visited node is an inner node generated after
its first subgraph has been visited but before the other
subgraphs.
exit: the currently visited node is an inner node generated after
all its subgraphs have been visited.
leaf: the currently visited node is a leaf node.
inner: the currently visited node is an inner node
cycle: the currently visited node is an internal node already on
the path, which means that the graph has a cycle. The subgraph
based on this node will not be walked.
bounce: the currently visited node is either an internal node which
subgraph has already been walked, or a leaf already met.
Subgraphs are never walked a twice with the argument tree=False.
The actual events generated are often a combination of these events, for
exemple, one may have a value of event(leaf & ~bounce). This attribute
path.event is best tested with bitwise operators. For example to test if
the walk is on a leaf, use 'if path.event & leaf:'.
The constant events are also attributes of the walk function, namely
(walk.enter, walk.within, ...)
"""
mask, selector = parse_event_arg(event)
isub = selector.index('', 1)
ileft = selector.index('', isub + 1)
tcycle = mask & cycle
tleaf = mask & leaf
tibounce = mask & bounce & inner
tfbounce = mask & bounce & leaf
tffirst = mask & ~bounce & leaf
todo = deque((iter((node,)),))
path = deque()
const_path = ConstSequence(path)
if reverse_path:
ppush, ppop, ivisited = path.appendleft, path.popleft, 0
else:
ppush, ppop, ivisited = path.append, path.pop, -1
less, more = todo.pop, todo.extend
hist = _MockDict() if tree else dict()
try:
while True:
sequence = todo[-1]
if sequence.__class__ is _Int:
less()
if sequence is _pop:
# this node's subtree is exhausted, prepare for bounce
hist[path[ivisited]] = tibounce
ppop()
else:
const_path.event = sequence
yield const_path
else:
try:
node = next(sequence)
except __HOLE__:
less()
else:
ppush(node)
# if node in history, generate a bounce event
# (actually one of (leaf & bounce, inner & bounce, cycle))
if node in hist:
const_path.event = hist[node]
if const_path.event:
yield const_path
ppop()
else:
sub = iter(gen_subnodes(node))
try:
snode = next(sub)
except StopIteration:
hist[node] = tfbounce
if tleaf:
const_path.event = tffirst
yield const_path
ppop()
else:
# ajouter node
hist[node] = tcycle
selector[ileft] = iter((snode,))
selector[isub] = sub
more(selector)
except IndexError:
if todo: # this allows gen_subnodes() to raise IndexError
raise
|
StopIteration
|
dataset/ETHPy150Open tow/sunburnt/sunburnt/walktree.py/walk
|
1,338
|
def Load(self, name, batchsize=None, typesize=4):
data_proto = self.data_proto
try:
this_set = next(d for d in data_proto.data if d.name == name)
except __HOLE__ as e:
print 'No data called %s found in proto file.' % name
raise e
filenames = sorted(glob.glob(os.path.join(data_proto.prefix,
this_set.file_pattern)))
numdims = np.prod(np.array(this_set.dimensions))
numlabels = this_set.num_labels
key = this_set.key
self.numdims = numdims * numlabels
datasetsize = this_set.size
if batchsize is None:
batchsize = datasetsize
self.batchsize = batchsize
total_disk_space = datasetsize * numdims * typesize
self.numbatches = datasetsize / batchsize
max_cpu_capacity = min(total_disk_space, GetBytes(data_proto.main_memory))
self.num_cpu_batches = max_cpu_capacity / (typesize * numdims * batchsize)
cpu_capacity = self.num_cpu_batches * batchsize * numdims * typesize
self.disk = Disk([filenames], [numdims], datasetsize, keys=[key])
self.cpu_cache = Cache(self.disk, cpu_capacity, [numdims],
typesize = typesize, randomize=False)
|
StopIteration
|
dataset/ETHPy150Open nitishsrivastava/deepnet/deepnet/compute_data_stats.py/DataViewer.Load
|
1,339
|
def remove_target(self, shape):
"""
This method removes the specified shape from the __shapes list.
:param shape: MShape : Shape to be removed from the __shapes list.
:return: bool : Whether or not the shape was found or not.
"""
try:
self.__shapes.remove(shape)
return True
except __HOLE__:
return False
|
ValueError
|
dataset/ETHPy150Open GelaniNijraj/PyMaterial/MAnimations/MAnimator.py/MAnimator.remove_target
|
1,340
|
def to_json(content, indent=None):
"""
Serializes a python object as JSON
This method uses the DJangoJSONEncoder to to ensure that python objects
such as Decimal objects are properly serialized. It can also serialize
Django QuerySet objects.
"""
if isinstance(content, QuerySet):
json_serializer = serializers.get_serializer('json')()
serialized_content = json_serializer.serialize(content, ensure_ascii=False, indent=indent)
else:
try:
serialized_content = json.dumps(content, cls=DecimalEncoder, ensure_ascii=False, indent=indent)
except __HOLE__:
# Fix for Django 1.5
serialized_content = json.dumps(content, ensure_ascii=False, indent=indent)
return serialized_content
|
TypeError
|
dataset/ETHPy150Open croach/django-simple-rest/simple_rest/utils/serializers.py/to_json
|
1,341
|
def get_logger(entity):
"""
Retrieves loggers from the enties fully scoped name.
get_logger(Replay) -> sc2reader.replay.Replay
get_logger(get_logger) -> sc2reader.utils.get_logger
:param entity: The entity for which we want a logger.
"""
try:
return logging.getLogger(entity.__module__+'.'+entity.__name__)
except __HOLE__:
raise TypeError("Cannot retrieve logger for {0}.".format(entity))
|
AttributeError
|
dataset/ETHPy150Open GraylinKim/sc2reader/sc2reader/log_utils.py/get_logger
|
1,342
|
def _generateFilters(self, filter_list):
filter_strings = []
filter_strings_ipv6 = []
for tcfilter in filter_list:
filter_tokens = tcfilter.split(",")
try:
filter_string = ""
filter_string_ipv6 = ""
for token in filter_tokens:
token_split = token.split("=")
key = token_split[0]
value = token_split[1]
# Check for ipv6 addresses and add them to the appropriate
# filter string
if key == "src" or key == "dst":
if '::' in value:
filter_string_ipv6 += "match ip6 {0} {1} ".format(
key, value)
else:
filter_string += "match ip {0} {1} ".format(
key, value)
else:
filter_string += "match ip {0} {1} ".format(key, value)
filter_string_ipv6 += "match ip6 {0} {1} ".format(
key, value)
if key == "sport" or key == "dport":
filter_string += "0xffff "
filter_string_ipv6 += "0xffff "
except __HOLE__:
print("Invalid filter parameters")
if filter_string:
filter_strings.append(filter_string)
if filter_string_ipv6:
filter_strings_ipv6.append(filter_string_ipv6)
return filter_strings, filter_strings_ipv6
|
IndexError
|
dataset/ETHPy150Open urbenlegend/netimpair/netimpair2.py/NetemInstance._generateFilters
|
1,343
|
def main():
# Network impairment arguments
argparser = argparse.ArgumentParser(
description="Network Impairment Test Tool")
argparser.add_argument(
"-n",
"--nic",
metavar="INTERFACE",
required=True,
type=str,
help="name of the network interface to be impaired")
argparser.add_argument(
"--inbound",
action="store_true",
help="do inbound impairment on the interface instead of outbound")
argparser.add_argument(
"--include",
action="append",
default=[],
help="ip addresses and/or ports to include in network impairment (example: --include src=ip,sport=portnum --include dst=ip,dport=portnum)")
argparser.add_argument(
"--exclude",
action="append",
default=[
"sport=22",
"dport=22"],
help="ip addresses and/or ports to exclude from network impairment (example: --exclude src=ip,sport=portnum --exclude dst=ip,dport=portnum)")
subparsers = argparser.add_subparsers(
title="impairments",
dest="subparser_name",
description="specify which impairment to enable",
help="valid impairments")
# loss args
netem_args = subparsers.add_parser("netem", help="enable packet loss")
netem_args.add_argument(
"--loss_ratio",
type=int,
default=0,
help="specify percentage of packets that will be lost")
netem_args.add_argument(
"--loss_corr",
type=int,
default=0,
help="specify a correlation factor for the random packet loss")
# dup args
netem_args.add_argument(
"--dup_ratio",
type=int,
default=0,
help="specify percentage of packets that will be duplicated")
# delay/jitter args
netem_args.add_argument(
"--delay",
type=int,
default=0,
help="specify an overall delay for each packet")
netem_args.add_argument(
"--jitter",
type=int,
default=0,
help="specify amount of jitter in milliseconds")
netem_args.add_argument(
"--delay_jitter_corr",
type=int,
default=0,
help="specify a correlation factor for the random jitter")
# reorder args
netem_args.add_argument(
"--reorder_ratio",
type=int,
default=0,
help="specify percentage of packets that will be reordered")
netem_args.add_argument(
"--reorder_corr",
type=int,
default=0,
help="specify a correlation factor for the random reordering")
# toggle parameter
netem_args.add_argument(
"--toggle",
nargs="+",
type=int,
default=[1000000],
help="toggles impairment on and off on specific intervals (example: --toggle 6 3 5 1 will enable impairment for 6 seconds, turn it off for 3, turn it on for 5, and turn it off for 1")
# rate limit args
rate_args = subparsers.add_parser("rate", help="enable packet reorder")
rate_args.add_argument(
"--limit",
type=int,
default=0,
help="specify rate limit in kb")
rate_args.add_argument(
"--buffer",
type=int,
default=2000,
help="specify how many tokens in terms of bytes should be available")
rate_args.add_argument(
"--latency",
type=int,
default=20,
help="specify the maximum time packets can stay in the queue before getting dropped")
rate_args.add_argument(
"--toggle",
nargs="+",
type=int,
default=[1000000],
help="toggles impairment on and off on specific intervals (example: --toggle 6 3 5 1 will enable impairment for 6 seconds, turn it off for 3, turn it on for 5, and turn it off for 1")
args = argparser.parse_args()
if os.geteuid() != 0:
print("You need root permissions to enable impairment! Please run with sudo or as root.")
exit(1)
try:
netem = NetemInstance()
if netem.initialize(
args.nic,
args.inbound,
args.include,
args.exclude):
# Catch SIGINT and SIGTERM so that we can cleanup
def handleSIGTERM(signum, frame):
print("")
netem.teardown()
# Print blank line before quitting to deal with some crappy
# terminal behavior
print("")
exit(5)
signal.signal(signal.SIGINT, handleSIGTERM)
signal.signal(signal.SIGTERM, handleSIGTERM)
print(
"Network impairment starting. Press Ctrl-C to restore normal behavior and quit.")
# Do impairment
if args.subparser_name == "netem":
netem.netem(
args.loss_ratio,
args.loss_corr,
args.dup_ratio,
args.delay,
args.jitter,
args.delay_jitter_corr,
args.reorder_ratio,
args.reorder_corr,
args.toggle)
elif args.subparser_name == "rate":
netem.rate(args.limit, args.buffer, args.latency, args.toggle)
# Shutdown cleanly
netem.teardown()
else:
print("NetemInstance failed to initialize correctly. Terminating")
netem.teardown()
exit(1)
except __HOLE__ as e:
print(e)
netem.teardown()
exit(5)
|
AssertionError
|
dataset/ETHPy150Open urbenlegend/netimpair/netimpair2.py/main
|
1,344
|
@classmethod
def get_resource_type(cls, ref):
try:
if not cls.is_reference(ref):
raise ValueError('%s is not a valid reference.' % ref)
return ref.split(cls.separator, 1)[0]
except (ValueError, IndexError, __HOLE__):
raise common_models.InvalidReferenceError(ref=ref)
|
AttributeError
|
dataset/ETHPy150Open StackStorm/st2/st2common/st2common/models/db/policy.py/PolicyTypeReference.get_resource_type
|
1,345
|
@classmethod
def get_name(cls, ref):
try:
if not cls.is_reference(ref):
raise ValueError('%s is not a valid reference.' % ref)
return ref.split(cls.separator, 1)[1]
except (__HOLE__, IndexError, AttributeError):
raise common_models.InvalidReferenceError(ref=ref)
|
ValueError
|
dataset/ETHPy150Open StackStorm/st2/st2common/st2common/models/db/policy.py/PolicyTypeReference.get_name
|
1,346
|
def is_day(date):
try:
date_format = '%Y-%m-%d'
is_valid = datetime.datetime.strptime(date, date_format).strftime(
date_format) == date
except TypeError:
is_valid = False
except __HOLE__:
is_valid = False
return is_valid
|
ValueError
|
dataset/ETHPy150Open Dwolla/arbalest/arbalest/pipeline/__init__.py/is_day
|
1,347
|
def is_day_hour(date):
try:
date_format = '%Y-%m-%d/%H'
is_valid = datetime.datetime.strptime(date, date_format).strftime(
date_format) == date
except __HOLE__:
is_valid = False
except ValueError:
is_valid = False
return is_valid
|
TypeError
|
dataset/ETHPy150Open Dwolla/arbalest/arbalest/pipeline/__init__.py/is_day_hour
|
1,348
|
def __get_directory_keys(self, path):
try:
return [k.name for k in self.bucket.list(path, '/') if
k.name.endswith('/')]
except __HOLE__:
return []
|
StopIteration
|
dataset/ETHPy150Open Dwolla/arbalest/arbalest/pipeline/__init__.py/S3SortedDataSources.__get_directory_keys
|
1,349
|
@staticmethod
def __get_date_from_path(source):
try:
return datetime.datetime.strptime(source.split('/')[-1],
'%Y-%d-%m').strftime('%Y-%d-%m')
except __HOLE__:
return None
|
ValueError
|
dataset/ETHPy150Open Dwolla/arbalest/arbalest/pipeline/__init__.py/SqlTimeSeriesImport.__get_date_from_path
|
1,350
|
def __init__(self, arch=ARCH_ARM_LINUX, memory=1024, debug_host=False, debug_target=False, qemu_bin=None, img_dir=".", img_fs="linux-rootfs.img", img_kernel="linux-kernel"):
SockPuppet.Controller.__init__(self, debug=debug_host)
self.debug_target = debug_target
if qemu_bin is not None:
self.QEMU_BIN = qemu_bin
elif platform.system() in ["Windows", "Linux"]:
self.QEMU_BIN = distutils.spawn.find_executable("qemu-system-arm")
else:
raise RuntimeError("Unsupported platform: %s" % platform.system())
assert self.QEMU_BIN is not None and os.path.isfile(self.QEMU_BIN), "Missing QEMU executable at %r" % self.QEMU_BIN
if arch == self.ARCH_ARM_LINUX:
img_fs = os.path.join(img_dir, "linux-rootfs.img")
img_kernel = os.path.join(img_dir, "linux-kernel")
assert os.path.isfile(img_fs), "Missing %s" % img_fs
assert os.path.isfile(img_kernel), "Missing %s" % img_kernel
cmd = [self.QEMU_BIN, "-M", "vexpress-a9", "-snapshot", "-m", str(memory),
"-kernel", img_kernel, "-sd", img_fs, "-append", "root=b300 console=ttyAMA0",
"-net", "nic", "-net", "user,hostfwd=tcp::2222-:22", "-serial", "stdio",
"-display", "none"]
else:
raise RuntimeError("Unsupported Architecture")
if debug_target:
self.target_out = sys.stdout
else:
self.target_out = open(os.devnull, "w")
try:
self.qemu_proc = subprocess.Popen(cmd, shell=False, stdout=self.target_out,
stderr=self.target_out)
self.connect()
if debug_target:
self.debug_client()
self.send_file(SockPuppet.__file__, "/root/SockPuppet.py")
self.send_quit()
self.disconnect()
self.connect()
if debug_target:
self.debug_client()
# Send required gdb wrapper
alf_debug_dir = os.path.join(ALF_BASE, "alf", "debug")
# Send required gdb wrapper
self.send_file(os.path.join(alf_debug_dir, "_common.py"))
self.send_file(os.path.join(alf_debug_dir, "_gdb.py"))
self.send_file(os.path.join(alf_debug_dir, "cmds.gdb"))
# Send CERT exploitable to target
exploitable_dir = os.path.join(ALF_BASE, "lib", "exploitable")
self.send_file(os.path.join(exploitable_dir, "exploitable.py"))
exploitable_dir = os.path.join(exploitable_dir, "lib")
self.send_file(os.path.join(exploitable_dir, "__init__.py"))
self.send_file(os.path.join(exploitable_dir, "analyzers.py"))
self.send_file(os.path.join(exploitable_dir, "classifier.py"))
self.send_file(os.path.join(exploitable_dir, "elf.py"))
self.send_file(os.path.join(exploitable_dir, "gdb_wrapper.py"))
self.send_file(os.path.join(exploitable_dir, "rules.py"))
self.send_file(os.path.join(exploitable_dir, "tools.py"))
self.send_file(os.path.join(exploitable_dir, "versions.py"))
self.run_code(_remote_init, self.REMOTE_WORKING_DIR)
except __HOLE__:
self._kill_qemu()
raise
finally:
if not debug_target:
self.target_out.close()
|
RuntimeError
|
dataset/ETHPy150Open blackberry/ALF/alf/debug/_qemu.py/QEmuTarget.__init__
|
1,351
|
def get_template_sources(template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except __HOLE__:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
|
UnicodeDecodeError
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/template/loaders/filesystem.py/get_template_sources
|
1,352
|
def load_template_source(template_name, template_dirs=None):
tried = []
for filepath in get_template_sources(template_name, template_dirs):
try:
return (open(filepath).read().decode(settings.FILE_CHARSET), filepath)
except __HOLE__:
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist, error_msg
|
IOError
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/template/loaders/filesystem.py/load_template_source
|
1,353
|
def _simpleTroveList(self, troveList, newFilesByTrove):
log.info('Verifying %s' % " ".join(x[1].getName() for x in troveList))
changedTroves = set()
try:
result = update.buildLocalChanges(self.db, troveList,
root=self.cfg.root,
forceSha1=self.forceHashCheck,
ignoreTransient=True,
updateContainers=True,
statCache = self.statCache)
if not result: return
cs = result[0]
changed = False
for (changed, trv) in result[1]:
if changed:
changedTroves.add(trv.getNameVersionFlavor())
except __HOLE__, err:
if err.errno == 13:
log.warning("Permission denied creating local changeset for"
" %s " % str([ x[0].getName() for x in troveList ]))
return
trovesChanged = []
for (dbTrv, srcTrv, newVer, flags), (changed, localTrv) in \
itertools.izip(troveList, result[1]):
if srcTrv.getNameVersionFlavor() in newFilesByTrove:
for path in newFilesByTrove[srcTrv.getNameVersionFlavor()]:
self._addFile(cs, localTrv, path)
localTrv.computeDigests()
trvDiff = localTrv.diff(dbTrv, absolute = False)[0]
cs.newTrove(trvDiff)
trovesChanged.append(localTrv.getNameVersionFlavor())
elif changed:
trovesChanged.append(localTrv.getNameVersionFlavor())
if trovesChanged:
self._handleChangeSet(trovesChanged, cs)
|
OSError
|
dataset/ETHPy150Open sassoftware/conary/conary/cmds/verify.py/_FindLocalChanges._simpleTroveList
|
1,354
|
def _scanFilesystem(self, fullTroveList, dirType = NEW_FILES_OWNED_DIR):
dirs = list(self.db.db.getTroveFiles(fullTroveList,
onlyDirectories = True))
skipDirs = dirset.DirectorySet(self.cfg.verifyDirsNoNewFiles)
dirOwners = dirset.DirectoryDict()
for trvInfo, dirName, stream in dirs:
dirOwners[dirName] = trvInfo
newFiles = []
if dirType == NEW_FILES_ANY_DIR and '/' not in dirOwners:
dirsToWalk = [ '/' ]
else:
dirsToWalk = sorted(dirOwners.itertops())
dbPaths = self.db.db.getTroveFiles(fullTroveList)
fsPaths = util.walkiter(dirsToWalk, skipPathSet = skipDirs,
root = self.cfg.root)
lastDbPath = None
lastFsPath = None
try:
for i in itertools.count(0):
if lastDbPath is None:
trvInfo, lastDbPath, lastDbStream = dbPaths.next()
if lastFsPath is None:
lastFsPath, lastFsStat = fsPaths.next()
if lastDbPath < lastFsPath:
# in the database, but not the filesystem. that means
# it's gone missing, and we don't care much
lastDbPath = None
elif lastDbPath > lastFsPath:
# it's in the filesystem, but not the database
if not stat.S_ISDIR(lastFsStat.st_mode):
newFiles.append(lastFsPath)
lastFsPath = None
else:
# it's in both places
absPath = os.path.normpath(self.cfg.root + lastFsPath)
self.statCache[absPath] = lastFsStat
lastFsPath = None
lastDbPath = None
except __HOLE__:
pass
# we don't need this, but drain the iterator
[ x for x in dbPaths ]
if lastFsPath and not stat.S_ISDIR(lastFsStat.st_mode):
newFiles.append(lastFsPath)
for lastFsPath, lastFsStat in fsPaths:
if not stat.S_ISDIR(lastFsStat.st_mode):
newFiles.append(lastFsPath)
# newFiles is a list of files which have been locally added.
# filter out ones which are owned by other troves. a bit silly
# to do this if --all is used.
areOwned = self.db.db.pathsOwned(newFiles)
newFiles = [ path for path, isOwned in
itertools.izip(newFiles, areOwned)
if not isOwned ]
# now turn newFiles into a dict which maps troves being verified to the
# new files for that trove. byTrove[None] lists new files which no
# trove claims ownership of
byTrove = {}
for path in newFiles:
trvInfo = dirOwners.get(path, None)
l = byTrove.setdefault(trvInfo, [])
l.append(path)
return byTrove
|
StopIteration
|
dataset/ETHPy150Open sassoftware/conary/conary/cmds/verify.py/_FindLocalChanges._scanFilesystem
|
1,355
|
def _GetSourceSafely(obj):
try:
return inspect.getsource(obj)
except __HOLE__:
logs.LogOnce(
_LOG.warning,
'Unable to load source code for %s. Only logging this once.', obj)
return ''
|
IOError
|
dataset/ETHPy150Open google/openhtf/openhtf/io/test_record.py/_GetSourceSafely
|
1,356
|
def _read_yaml(self, path):
'''
reads a yaml-formatted configuration file at the given path and
returns a python dictionary with the pared items in it.
'''
try:
with open(path) as stream:
return yaml.load(stream)
except yaml.parser.ParserError as yamlerr:
print("Failed to parse configfile: {0}".format(path))
print(yamlerr)
sys.exit(1)
except yaml.scanner.ScannerError as yamlerr:
print("Failed to parse configfile: {0}".format(path))
print(yamlerr)
sys.exit(1)
except __HOLE__ as ioerr:
print("Failed to read configfile:")
print(os.strerror(ioerr.errno))
sys.exit(1)
except OSError as oserr:
print("Failed to read configfile:")
print(os.strerror(oserr.errno))
sys.exit(1)
|
IOError
|
dataset/ETHPy150Open felskrone/salt-eventsd/salteventsd/loader.py/SaltEventsdLoader._read_yaml
|
1,357
|
def get_dynamic_route_by_def_name(self, def_name, routes):
try:
return [i for i in routes if def_name in i.mapping.values()][0]
except __HOLE__:
return None
|
IndexError
|
dataset/ETHPy150Open chibisov/drf-extensions/tests_app/tests/unit/routers/tests.py/ExtendedDefaultRouterTest.get_dynamic_route_by_def_name
|
1,358
|
def openlock(filename, operation, wait=True):
"""
Returns a file-like object that gets a fnctl() lock.
`operation` should be one of LOCK_SH or LOCK_EX for shared or
exclusive locks.
If `wait` is False, then openlock() will not block on trying to
acquire the lock.
"""
f = os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT, 0666), "r+")
if not wait:
operation |= LOCK_NB
try:
lockf(f.fileno(), operation)
except __HOLE__, err:
if not wait and err.errno in (EACCES, EAGAIN):
from django.core.management.base import CommandError
raise CommandError("Could not acquire lock on '%s' held by %s." %
(filename, f.readline().strip()))
raise
print >>f, "%s:%d" % (socket.gethostname(), os.getpid())
f.truncate()
f.flush()
return f
|
IOError
|
dataset/ETHPy150Open e-loue/django-lean/django_lean/lockfile.py/openlock
|
1,359
|
def _run_get_new_deps(self):
self.task.set_tracking_url = self.tracking_url_callback
self.task.set_status_message = self.status_message_callback
def deprecated_tracking_url_callback(*args, **kwargs):
warnings.warn("tracking_url_callback in run() args is deprecated, use "
"set_tracking_url instead.", DeprecationWarning)
self.tracking_url_callback(*args, **kwargs)
run_again = False
try:
task_gen = self.task.run(tracking_url_callback=deprecated_tracking_url_callback)
except __HOLE__ as ex:
if 'unexpected keyword argument' not in str(ex):
raise
run_again = True
if run_again:
task_gen = self.task.run()
self.task.set_tracking_url = None
self.task.set_status_message = None
if not isinstance(task_gen, types.GeneratorType):
return None
next_send = None
while True:
try:
if next_send is None:
requires = six.next(task_gen)
else:
requires = task_gen.send(next_send)
except StopIteration:
return None
new_req = flatten(requires)
new_deps = [(t.task_module, t.task_family, t.to_str_params())
for t in new_req]
if all(t.complete() for t in new_req):
next_send = getpaths(requires)
else:
return new_deps
|
TypeError
|
dataset/ETHPy150Open spotify/luigi/luigi/worker.py/TaskProcess._run_get_new_deps
|
1,360
|
def run(self):
logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task)
if self.random_seed:
# Need to have different random seeds if running in separate processes
random.seed((os.getpid(), time.time()))
status = FAILED
expl = ''
missing = []
new_deps = []
try:
# Verify that all the tasks are fulfilled! For external tasks we
# don't care about unfulfilled dependencies, because we are just
# checking completeness of self.task so outputs of dependencies are
# irrelevant.
if not _is_external(self.task):
missing = [dep.task_id for dep in self.task.deps() if not dep.complete()]
if missing:
deps = 'dependency' if len(missing) == 1 else 'dependencies'
raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing)))
self.task.trigger_event(Event.START, self.task)
t0 = time.time()
status = None
if _is_external(self.task):
# External task
# TODO(erikbern): We should check for task completeness after non-external tasks too!
# This will resolve #814 and make things a lot more consistent
if self.task.complete():
status = DONE
else:
status = FAILED
expl = 'Task is an external data dependency ' \
'and data does not exist (yet?).'
else:
new_deps = self._run_get_new_deps()
status = DONE if not new_deps else PENDING
if new_deps:
logger.info(
'[pid %s] Worker %s new requirements %s',
os.getpid(), self.worker_id, self.task)
elif status == DONE:
self.task.trigger_event(
Event.PROCESSING_TIME, self.task, time.time() - t0)
expl = self.task.on_success()
logger.info('[pid %s] Worker %s done %s', os.getpid(),
self.worker_id, self.task)
self.task.trigger_event(Event.SUCCESS, self.task)
except __HOLE__:
raise
except BaseException as ex:
status = FAILED
logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task)
self.task.trigger_event(Event.FAILURE, self.task, ex)
raw_error_message = self.task.on_failure(ex)
expl = raw_error_message
finally:
self.result_queue.put(
(self.task.task_id, status, expl, missing, new_deps))
|
KeyboardInterrupt
|
dataset/ETHPy150Open spotify/luigi/luigi/worker.py/TaskProcess.run
|
1,361
|
def terminate(self):
"""Terminate this process and its subprocesses."""
# default terminate() doesn't cleanup child processes, it orphans them.
try:
return self._recursive_terminate()
except __HOLE__:
return super(TaskProcess, self).terminate()
|
ImportError
|
dataset/ETHPy150Open spotify/luigi/luigi/worker.py/TaskProcess.terminate
|
1,362
|
def get(self, block=None, timeout=None):
try:
return self.pop()
except __HOLE__:
raise Queue.Empty
|
IndexError
|
dataset/ETHPy150Open spotify/luigi/luigi/worker.py/DequeQueue.get
|
1,363
|
def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs):
if scheduler is None:
scheduler = CentralPlannerScheduler()
self.worker_processes = int(worker_processes)
self._worker_info = self._generate_worker_info()
if not worker_id:
worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info])
self._config = worker(**kwargs)
assert self._config.wait_interval >= _WAIT_INTERVAL_EPS, "[worker] wait_interval must be positive"
assert self._config.wait_jitter >= 0.0, "[worker] wait_jitter must be equal or greater than zero"
self._id = worker_id
self._scheduler = scheduler
self._assistant = assistant
self._stop_requesting_work = False
self.host = socket.gethostname()
self._scheduled_tasks = {}
self._suspended_tasks = {}
self._first_task = None
self.add_succeeded = True
self.run_succeeded = True
self.unfulfilled_counts = collections.defaultdict(int)
# note that ``signal.signal(signal.SIGUSR1, fn)`` only works inside the main execution thread, which is why we
# provide the ability to conditionally install the hook.
if not self._config.no_install_shutdown_handler:
try:
signal.signal(signal.SIGUSR1, self.handle_interrupt)
except __HOLE__:
pass
# Keep info about what tasks are running (could be in other processes)
if worker_processes == 1:
self._task_result_queue = DequeQueue()
else:
self._task_result_queue = multiprocessing.Queue()
self._running_tasks = {}
# Stuff for execution_summary
self._add_task_history = []
self._get_work_response_history = []
|
AttributeError
|
dataset/ETHPy150Open spotify/luigi/luigi/worker.py/Worker.__init__
|
1,364
|
def add(self, task, multiprocess=False):
"""
Add a Task for the worker to check and possibly schedule and run.
Returns True if task and its dependencies were successfully scheduled or completed before.
"""
if self._first_task is None and hasattr(task, 'task_id'):
self._first_task = task.task_id
self.add_succeeded = True
if multiprocess:
queue = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool()
else:
queue = DequeQueue()
pool = SingleProcessPool()
self._validate_task(task)
pool.apply_async(check_complete, [task, queue])
# we track queue size ourselves because len(queue) won't work for multiprocessing
queue_size = 1
try:
seen = set([task.task_id])
while queue_size:
current = queue.get()
queue_size -= 1
item, is_complete = current
for next in self._add(item, is_complete):
if next.task_id not in seen:
self._validate_task(next)
seen.add(next.task_id)
pool.apply_async(check_complete, [next, queue])
queue_size += 1
except (__HOLE__, TaskException):
raise
except Exception as ex:
self.add_succeeded = False
formatted_traceback = traceback.format_exc()
self._log_unexpected_error(task)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_unexpected_error(task, formatted_traceback)
raise
finally:
pool.close()
pool.join()
return self.add_succeeded
|
KeyboardInterrupt
|
dataset/ETHPy150Open spotify/luigi/luigi/worker.py/Worker.add
|
1,365
|
def _add(self, task, is_complete):
if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit:
logger.warning('Will not schedule %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit)
return
formatted_traceback = None
try:
self._check_complete_value(is_complete)
except __HOLE__:
raise
except AsyncCompletionException as ex:
formatted_traceback = ex.trace
except BaseException:
formatted_traceback = traceback.format_exc()
if formatted_traceback is not None:
self.add_succeeded = False
self._log_complete_error(task, formatted_traceback)
task.trigger_event(Event.DEPENDENCY_MISSING, task)
self._email_complete_error(task, formatted_traceback)
deps = None
status = UNKNOWN
runnable = False
elif is_complete:
deps = None
status = DONE
runnable = False
task.trigger_event(Event.DEPENDENCY_PRESENT, task)
elif _is_external(task):
deps = None
status = PENDING
runnable = worker().retry_external_tasks
task.trigger_event(Event.DEPENDENCY_MISSING, task)
logger.warning('Data for %s does not exist (yet?). The task is an '
'external data depedency, so it can not be run from'
' this luigi process.', task)
else:
try:
deps = task.deps()
except Exception as ex:
formatted_traceback = traceback.format_exc()
self.add_succeeded = False
self._log_dependency_error(task, formatted_traceback)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_dependency_error(task, formatted_traceback)
deps = None
status = UNKNOWN
runnable = False
else:
status = PENDING
runnable = True
if task.disabled:
status = DISABLED
if deps:
for d in deps:
self._validate_dependency(d)
task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d)
yield d # return additional tasks to add
deps = [d.task_id for d in deps]
self._scheduled_tasks[task.task_id] = task
self._add_task(worker=self._id, task_id=task.task_id, status=status,
deps=deps, runnable=runnable, priority=task.priority,
resources=task.process_resources(),
params=task.to_str_params(),
family=task.task_family,
module=task.task_module)
|
KeyboardInterrupt
|
dataset/ETHPy150Open spotify/luigi/luigi/worker.py/Worker._add
|
1,366
|
def _set_connection(self, connection):
try:
self._connection = weakref.proxy(connection)
self._connection._protocol
except (AttributeError, __HOLE__):
raise errors.InterfaceError(errno=2048)
|
TypeError
|
dataset/ETHPy150Open appnexus/schema-tool/schematool/mysql/connector/cursor.py/MySQLCursor._set_connection
|
1,367
|
def _have_unread_result(self):
"""Check whether there is an unread result"""
try:
return self._connection.unread_result
except __HOLE__:
return False
|
AttributeError
|
dataset/ETHPy150Open appnexus/schema-tool/schematool/mysql/connector/cursor.py/MySQLCursor._have_unread_result
|
1,368
|
def _handle_noresultset(self, res):
"""Handles result of execute() when there is no result set
"""
try:
self._rowcount = res['affected_rows']
self._last_insert_id = res['insert_id']
self._warning_count = res['warning_count']
except (KeyError, __HOLE__), err:
raise errors.ProgrammingError(
"Failed handling non-resultset; %s" % err)
if self._connection.get_warnings is True and self._warning_count:
self._warnings = self._fetch_warnings()
|
TypeError
|
dataset/ETHPy150Open appnexus/schema-tool/schematool/mysql/connector/cursor.py/MySQLCursor._handle_noresultset
|
1,369
|
def execute(self, operation, params=None, multi=False):
"""Executes the given operation
Executes the given operation substituting any markers with
the given parameters.
For example, getting all rows where id is 5:
cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,))
The multi argument should be set to True when executing multiple
statements in one operation. If not set and multiple results are
found, an InterfaceError will be raised.
If warnings where generated, and connection.get_warnings is True, then
self._warnings will be a list containing these warnings.
Returns an iterator when multi is True, otherwise None.
"""
if not operation:
return
if self._have_unread_result():
raise errors.InternalError("Unread result found.")
self._reset_result()
stmt = ''
try:
if isinstance(operation, unicode):
operation = operation.encode(self._connection.charset)
except (UnicodeDecodeError, UnicodeEncodeError), e:
raise errors.ProgrammingError(str(e))
if params is not None:
try:
stmt = operation % self._process_params(params)
except __HOLE__:
raise errors.ProgrammingError(
"Wrong number of arguments during string formatting")
else:
stmt = operation
if multi:
self._executed = stmt
self._executed_list = []
return self._execute_iter(self._connection.cmd_query_iter(stmt))
else:
self._executed = stmt
try:
self._handle_result(self._connection.cmd_query(stmt))
except errors.InterfaceError, err:
if self._connection._have_next_result:
raise errors.InterfaceError(
"Use multi=True when executing multiple statements")
raise
return None
|
TypeError
|
dataset/ETHPy150Open appnexus/schema-tool/schematool/mysql/connector/cursor.py/MySQLCursor.execute
|
1,370
|
def executemany(self, operation, seq_params):
"""Execute the given operation multiple times
The executemany() method will execute the operation iterating
over the list of parameters in seq_params.
Example: Inserting 3 new employees and their phone number
data = [
('Jane','555-001'),
('Joe', '555-001'),
('John', '555-003')
]
stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s')"
cursor.executemany(stmt, data)
INSERT statements are optimized by batching the data, that is
using the MySQL multiple rows syntax.
Results are discarded. If they are needed, consider looping over
data using the execute() method.
"""
if not operation:
return
if self._have_unread_result():
raise errors.InternalError("Unread result found.")
elif len(RE_SQL_SPLIT_STMTS.split(operation)) > 1:
raise errors.InternalError(
"executemany() does not support multiple statements")
# Optimize INSERTs by batching them
if re.match(RE_SQL_INSERT_STMT,operation):
tmp = re.sub(RE_SQL_ON_DUPLICATE, '',
re.sub(RE_SQL_COMMENT, '', operation))
m = re.search(RE_SQL_INSERT_VALUES, tmp)
if not m:
raise errors.InterfaceError(
"Failed rewriting statement for multi-row INSERT. "
"Check SQL syntax."
)
fmt = m.group(1)
values = []
for params in seq_params:
values.append(fmt % self._process_params(params))
operation = operation.replace(m.group(1), ','.join(values), 1)
return self.execute(operation)
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
if self.with_rows and self._have_unread_result():
self.fetchall()
rowcnt += self._rowcount
except (__HOLE__, TypeError), err:
raise errors.InterfaceError(
"Failed executing the operation; %s" % err)
except:
# Raise whatever execute() raises
raise
self._rowcount = rowcnt
|
ValueError
|
dataset/ETHPy150Open appnexus/schema-tool/schematool/mysql/connector/cursor.py/MySQLCursor.executemany
|
1,371
|
@classmethod
def setUpClass(cls):
super(CitationsViewsTestCase, cls).setUpClass()
# populate the DB with parsed citation styles
try:
parse_citation_styles.main()
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open CenterForOpenScience/osf.io/tests/test_citations.py/CitationsViewsTestCase.setUpClass
|
1,372
|
def delete(self, package):
filename = self.get_path(package)
os.unlink(filename)
version_dir = os.path.dirname(filename)
try:
os.rmdir(version_dir)
except __HOLE__:
return
package_dir = os.path.dirname(version_dir)
try:
os.rmdir(package_dir)
except OSError:
return
|
OSError
|
dataset/ETHPy150Open mathcamp/pypicloud/pypicloud/storage/files.py/FileStorage.delete
|
1,373
|
def __call__(self, *args, **kwargs):
# If the function args cannot be used as a cache hash key, fail fast
key = cPickle.dumps((args, kwargs))
try:
return self.cache[key]
except __HOLE__:
value = self.func(*args, **kwargs)
self.cache[key] = value
return value
|
KeyError
|
dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/elb/elb.py/memoized.__call__
|
1,374
|
def getcomments(pyObject):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(pyObject)
except (__HOLE__, TypeError):
return None
if ismodule(pyObject):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '')
|
IOError
|
dataset/ETHPy150Open ufora/ufora/packages/python/pyfora/PyforaInspect.py/getcomments
|
1,375
|
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
lines = None
if filename == "<stdin>":
lineno = StdinCache.singleton().refreshFromReadline().findCodeLineNumberWithinStdin(frame.f_code) + 1
lines = StdinCache.singleton().getlines()
if context > 0:
start = lineno - 1 - context//2
try:
if lines is None:
lines, _ = findsource(frame)
except __HOLE__:
if lines is None:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
|
IOError
|
dataset/ETHPy150Open ufora/ufora/packages/python/pyfora/PyforaInspect.py/getframeinfo
|
1,376
|
def maybeName(obj):
""" Returns an object's __name__ attribute or it's string representation.
@param obj any object
@return obj name or string representation
"""
try:
return obj.__name__
except (__HOLE__, ):
return str(obj)
|
AttributeError
|
dataset/ETHPy150Open blampe/IbPy/ib/lib/__init__.py/maybeName
|
1,377
|
def extracted(name,
source,
archive_format,
archive_user=None,
password=None,
user=None,
group=None,
tar_options=None,
zip_options=None,
source_hash=None,
if_missing=None,
keep=False,
trim_output=False,
source_hash_update=None):
'''
.. versionadded:: 2014.1.0
State that make sure an archive is extracted in a directory.
The downloaded archive is erased if successfully extracted.
The archive is downloaded only if necessary.
.. note::
If ``if_missing`` is not defined, this state will check for ``name``
instead. If ``name`` exists, it will assume the archive was previously
extracted successfully and will not extract it again.
Example, tar with flag for lmza compression:
.. code-block:: yaml
graylog2-server:
archive.extracted:
- name: /opt/
- source: https://github.com/downloads/Graylog2/graylog2-server/graylog2-server-0.9.6p1.tar.lzma
- source_hash: md5=499ae16dcae71eeb7c3a30c75ea7a1a6
- tar_options: J
- archive_format: tar
- if_missing: /opt/graylog2-server-0.9.6p1/
Example, tar with flag for verbose output:
.. code-block:: yaml
graylog2-server:
archive.extracted:
- name: /opt/
- source: https://github.com/downloads/Graylog2/graylog2-server/graylog2-server-0.9.6p1.tar.gz
- source_hash: md5=499ae16dcae71eeb7c3a30c75ea7a1a6
- archive_format: tar
- tar_options: v
- user: root
- group: root
- if_missing: /opt/graylog2-server-0.9.6p1/
Example, tar with flag for lmza compression and update based if source_hash differs from what was
previously extracted:
.. code-block:: yaml
graylog2-server:
archive.extracted:
- name: /opt/
- source: https://github.com/downloads/Graylog2/graylog2-server/graylog2-server-0.9.6p1.tar.lzma
- source_hash: md5=499ae16dcae71eeb7c3a30c75ea7a1a6
- source_hash_update: true
- tar_options: J
- archive_format: tar
- if_missing: /opt/graylog2-server-0.9.6p1/
name
Directory name where to extract the archive
password
Password to use with password protected zip files. Currently only zip
files with passwords are supported.
.. versionadded:: 2016.3.0
source
Archive source, same syntax as file.managed source argument.
source_hash
Hash of source file, or file with list of hash-to-file mappings.
It uses the same syntax as the file.managed source_hash argument.
source_hash_update
Set this to true if archive should be extracted if source_hash has
changed. This would extract regardless of the `if_missing`
parameter.
archive_format
tar, zip or rar
user
The user to own each extracted file.
.. versionadded:: 2015.8.0
group
The group to own each extracted file.
.. versionadded:: 2015.8.0
if_missing
Some archives, such as tar, extract themselves in a subfolder.
This directive can be used to validate if the archive had been
previously extracted.
tar_options
Required if used with ``archive_format: tar``, otherwise optional.
It needs to be the tar argument specific to the archive being extracted,
such as 'J' for LZMA or 'v' to verbosely list files processed.
Using this option means that the tar executable on the target will
be used, which is less platform independent.
Main operators like -x, --extract, --get, -c and -f/--file
**should not be used** here.
If ``archive_format`` is ``zip`` or ``rar`` and this option is not set,
then the Python tarfile module is used. The tarfile module supports gzip
and bz2 in Python 2.
zip_options
Optional when using ``zip`` archives, ignored when usign other archives
files. This is mostly used to overwrite exsiting files with ``o``.
This options are only used when ``unzip`` binary is used.
.. versionadded:: 2016.3.1
keep
Keep the archive in the minion's cache
trim_output
The number of files we should output on success before the rest are trimmed, if this is
set to True then it will default to 100
'''
ret = {'name': name, 'result': None, 'changes': {}, 'comment': ''}
valid_archives = ('tar', 'rar', 'zip')
if archive_format not in valid_archives:
ret['result'] = False
ret['comment'] = '{0} is not supported, valid formats are: {1}'.format(
archive_format, ','.join(valid_archives))
return ret
if not name.endswith('/'):
name += '/'
if if_missing is None:
if_missing = name
if source_hash and source_hash_update:
hash = source_hash.split("=")
source_file = '{0}.{1}'.format(os.path.basename(source), hash[0])
hash_fname = os.path.join(__opts__['cachedir'],
'files',
__env__,
source_file)
if compareChecksum(hash_fname, name, hash[1]):
ret['result'] = True
ret['comment'] = 'Hash {0} has not changed'.format(hash[1])
return ret
elif (
__salt__['file.directory_exists'](if_missing)
or __salt__['file.file_exists'](if_missing)
):
ret['result'] = True
ret['comment'] = '{0} already exists'.format(if_missing)
return ret
log.debug('Input seem valid so far')
filename = os.path.join(__opts__['cachedir'],
'files',
__env__,
'{0}.{1}'.format(re.sub('[:/\\\\]', '_', if_missing),
archive_format))
if __opts__['test']:
source_match = source
else:
try:
source_match = __salt__['file.source_list'](source,
source_hash,
__env__)[0]
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
if not os.path.exists(filename):
if __opts__['test']:
ret['result'] = None
ret['comment'] = \
'{0} {1} would be downloaded to cache'.format(
'One of' if not isinstance(source_match, six.string_types)
else 'Archive',
source_match
)
return ret
log.debug('%s is not in cache, downloading it', source_match)
file_result = __salt__['state.single']('file.managed',
filename,
source=source,
source_hash=source_hash,
makedirs=True,
saltenv=__env__)
log.debug('file.managed: {0}'.format(file_result))
# get value of first key
try:
file_result = file_result[next(six.iterkeys(file_result))]
except AttributeError:
pass
try:
if not file_result['result']:
log.debug('failed to download {0}'.format(source))
return file_result
except __HOLE__:
if not file_result:
log.debug('failed to download {0}'.format(source))
return file_result
else:
log.debug('Archive %s is already in cache', name)
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} {1} would be extracted to {2}'.format(
'One of' if not isinstance(source_match, six.string_types)
else 'Archive',
source_match,
name
)
return ret
__salt__['file.makedirs'](name, user=user, group=group)
log.debug('Extracting {0} to {1}'.format(filename, name))
if archive_format == 'zip':
files = __salt__['archive.unzip'](filename, name, options=zip_options, trim_output=trim_output, password=password)
elif archive_format == 'rar':
files = __salt__['archive.unrar'](filename, name, trim_output=trim_output)
else:
if tar_options is None:
with closing(tarfile.open(filename, 'r')) as tar:
files = tar.getnames()
tar.extractall(name)
else:
tar_opts = tar_options.split(' ')
tar_cmd = ['tar']
tar_shortopts = 'x'
tar_longopts = []
for position, opt in enumerate(tar_opts):
if opt.startswith('-'):
tar_longopts.append(opt)
else:
if position > 0:
tar_longopts.append(opt)
else:
append_opt = opt
append_opt = append_opt.replace('x', '').replace('f', '')
tar_shortopts = tar_shortopts + append_opt
tar_cmd.append(tar_shortopts)
tar_cmd.extend(tar_longopts)
tar_cmd.extend(['-f', filename])
results = __salt__['cmd.run_all'](tar_cmd, cwd=name, python_shell=False)
if results['retcode'] != 0:
ret['result'] = False
ret['changes'] = results
return ret
if 'bsdtar' in __salt__['cmd.run']('tar --version', python_shell=False):
files = results['stderr']
else:
files = results['stdout']
if not files:
files = 'no tar output so far'
# Recursively set user and group ownership of files after extraction.
# Note: We do this here because we might not have access to the cachedir.
if user or group:
dir_result = __salt__['state.single']('file.directory',
if_missing,
user=user,
group=group,
recurse=['user', 'group'])
log.debug('file.directory: {0}'.format(dir_result))
if len(files) > 0:
ret['result'] = True
ret['changes']['directories_created'] = [name]
ret['changes']['extracted_files'] = files
ret['comment'] = '{0} extracted to {1}'.format(source_match, name)
if not keep:
os.unlink(filename)
if source_hash and source_hash_update:
updateChecksum(hash_fname, name, hash[1])
else:
__salt__['file.remove'](if_missing)
ret['result'] = False
ret['comment'] = 'Can\'t extract content of {0}'.format(source_match)
return ret
|
TypeError
|
dataset/ETHPy150Open saltstack/salt/salt/states/archive.py/extracted
|
1,378
|
def validateFacts(val, factsToCheck):
# may be called in streaming batches or all at end (final) if not streaming
modelXbrl = val.modelXbrl
modelDocument = modelXbrl.modelDocument
# note EBA 2.1 is in ModelDocument.py
timelessDatePattern = re.compile(r"\s*([0-9]{4})-([0-9]{2})-([0-9]{2})\s*$")
for cntx in modelXbrl.contexts.values():
if getattr(cntx, "_batchChecked", False):
continue # prior streaming batch already checked
cntx._batchChecked = True
val.cntxEntities.add(cntx.entityIdentifier)
dateElts = XmlUtil.descendants(cntx, XbrlConst.xbrli, ("startDate","endDate","instant"))
if any(not timelessDatePattern.match(e.textValue) for e in dateElts):
modelXbrl.error(("EBA.2.10","EIOPA.2.10"),
_('Period dates must be whole dates without time or timezone: %(dates)s.'),
modelObject=cntx, dates=", ".join(e.text for e in dateElts))
if cntx.isForeverPeriod:
modelXbrl.error(("EBA.2.11","EIOPA.N.2.11"),
_('Forever context period is not allowed.'),
modelObject=cntx)
elif cntx.isStartEndPeriod:
modelXbrl.error(("EBA.2.13","EIOPA.N.2.11"),
_('Start-End (flow) context period is not allowed.'),
modelObject=cntx)
elif cntx.isInstantPeriod:
# cannot pass context object to final() below, for error logging, if streaming mode
val.cntxDates[cntx.instantDatetime].add(modelXbrl if getattr(val.modelXbrl, "isStreamingMode", False)
else cntx)
if cntx.hasSegment:
modelXbrl.error(("EBA.2.14","EIOPA.N.2.14"),
_("Contexts MUST NOT contain xbrli:segment values: %(cntx)s.'"),
modelObject=cntx, cntx=cntx.id)
if cntx.nonDimValues("scenario"):
modelXbrl.error(("EBA.2.15","EIOPA.S.2.15" if val.isEIOPAfullVersion else "EIOPA.N.2.15"),
_("Contexts MUST NOT contain non-dimensional xbrli:scenario values: %(cntx)s.'"),
modelObject=cntx, cntx=cntx.id,
messageCodes=("EBA.2.15","EIOPA.N.2.15","EIOPA.S.2.15"))
val.unusedCntxIDs.add(cntx.id)
if val.isEIOPA_2_0_1 and len(cntx.id) > 128:
modelXbrl.warning("EIOPA.S.2.6",
_("Contexts IDs SHOULD be short: %(cntx)s.'"),
modelObject=cntx, cntx=cntx.id)
for unit in modelXbrl.units.values():
if getattr(unit, "_batchChecked", False):
continue # prior streaming batch already checked
unit._batchChecked = True
val.unusedUnitIDs.add(unit.id)
factsByQname = defaultdict(set) # top level for this
for f in factsToCheck:
factsByQname[f.qname].add(f)
val.unusedCntxIDs.discard(f.contextID)
val.unusedUnitIDs.discard(f.unitID)
if f.objectIndex < val.firstFactObjectIndex:
val.firstFactObjectIndex = f.objectIndex
val.firstFact = f
for fIndicators in factsByQname[qnFIndicators]:
val.numFilingIndicatorTuples += 1
for fIndicator in fIndicators.modelTupleFacts:
_value = (getattr(fIndicator, "xValue", None) or fIndicator.value) # use validated xValue if DTS else value for skipDTS
_filed = fIndicator.get("{http://www.eurofiling.info/xbrl/ext/filing-indicators}filed", "true") in ("true", "1")
if _value in val.filingIndicators:
modelXbrl.error(("EBA.1.6.1", "EIOPA.1.6.1"),
_('Multiple filing indicators facts for indicator %(filingIndicator)s.'),
modelObject=(fIndicator, val.filingIndicators[_value]), filingIndicator=_value)
if _filed and not val.filingIndicators[_value]:
val.filingIndicators[_value] = _filed #set to filed if any of the multiple indicators are filed=true
else: # not a duplicate filing indicator
val.filingIndicators[_value] = _filed
val.unusedCntxIDs.discard(fIndicator.contextID)
cntx = fIndicator.context
if cntx is not None and (cntx.hasSegment or cntx.hasScenario):
modelXbrl.error("EIOPA.N.1.6.d" if val.isEIOPAfullVersion else "EIOPA.S.1.6.d",
_('Filing indicators must not contain segment or scenario elements %(filingIndicator)s.'),
modelObject=fIndicator, filingIndicator=_value)
if fIndicators.objectIndex > val.firstFactObjectIndex:
modelXbrl.warning("EIOPA.1.6.2",
_('Filing indicators should precede first fact %(firstFact)s.'),
modelObject=(fIndicators, val.firstFact), firstFact=val.firstFact.qname)
if val.isEIOPAfullVersion:
for fIndicator in factsByQname[qnFilingIndicator]:
if fIndicator.getparent().qname == XbrlConst.qnXbrliXbrl:
_isPos = fIndicator.get("{http://www.eurofiling.info/xbrl/ext/filing-indicators}filed", "true") in ("true", "1")
_value = (getattr(fIndicator, "xValue", None) or fIndicator.value) # use validated xValue if DTS else value for skipDTS
modelXbrl.error("EIOPA.1.6.a" if _isPos else "EIOPA.1.6.b",
_('Filing indicators must be in a tuple %(filingIndicator)s.'),
modelObject=fIndicator, filingIndicator=_value,
messageCodes=("EIOPA.1.6.a", "EIOPA.1.6.b"))
otherFacts = {} # (contextHash, unitHash, xmlLangHash) : fact
nilFacts = []
stringFactsWithXmlLang = []
nonMonetaryNonPureFacts = []
for qname, facts in factsByQname.items():
for f in facts:
if f.qname == qnFilingIndicator:
continue # skip erroneous root-level filing indicators
if modelXbrl.skipDTS:
c = f.qname.localName[0]
isNumeric = c in ('m', 'p', 'r', 'i')
isMonetary = c == 'm'
isInteger = c == 'i'
isPercent = c == 'p'
isString = c == 's'
isEnum = c == 'e'
else:
concept = f.concept
if concept is not None:
isNumeric = concept.isNumeric
isMonetary = concept.isMonetary
isInteger = concept.baseXbrliType in integerItemTypes
isPercent = concept.typeQname in (qnPercentItemType, qnPureItemType)
isString = concept.baseXbrliType in ("stringItemType", "normalizedStringItemType")
isEnum = concept.typeQname == qnEnumerationItemType
else:
isNumeric = isString = isEnum = False # error situation
k = (f.getparent().objectIndex,
f.qname,
f.context.contextDimAwareHash if f.context is not None else None,
f.unit.hash if f.unit is not None else None,
hash(f.xmlLang))
if f.qname == qnFIndicators and val.validateEIOPA:
pass
elif k not in otherFacts:
otherFacts[k] = {f}
else:
matches = [o
for o in otherFacts[k]
if (f.getparent().objectIndex == o.getparent().objectIndex and
f.qname == o.qname and
f.context.isEqualTo(o.context) if f.context is not None and o.context is not None else True) and
# (f.unit.isEqualTo(o.unit) if f.unit is not None and o.unit is not None else True) and
(f.xmlLang == o.xmlLang)]
if matches:
contexts = [f.contextID] + [o.contextID for o in matches]
modelXbrl.error(("EBA.2.16", "EIOPA.S.2.16" if val.isEIOPAfullVersion else "EIOPA.S.2.16.a"),
_('Facts are duplicates %(fact)s contexts %(contexts)s.'),
modelObject=[f] + matches, fact=f.qname, contexts=', '.join(contexts),
messageCodes=("EBA.2.16", "EIOPA.S.2.16", "EIOPA.S.2.16.a"))
else:
otherFacts[k].add(f)
if isNumeric:
if f.precision:
modelXbrl.error(("EBA.2.17", "EIOPA.2.18.a"),
_("Numeric fact %(fact)s of context %(contextID)s has a precision attribute '%(precision)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, precision=f.precision)
if f.decimals and not f.isNil: # in XbrlDpmSqlDB for 2_0_1
if f.decimals == "INF":
if not val.isEIOPAfullVersion:
modelXbrl.error("EIOPA.S.2.18.f",
_("Monetary fact %(fact)s of context %(contextID)s has a decimal attribute INF: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
else:
try:
xValue = f.xValue
dec = int(f.decimals)
if isMonetary:
if val.isEIOPA_2_0_1:
_absXvalue = abs(xValue)
if str(f.qname) in s_2_18_c_a_met:
dMin = 2
elif _absXvalue >= 100000000:
dMin = -4
elif 100000000 > _absXvalue >= 1000000:
dMin = -3
elif 1000000 > _absXvalue >= 1000:
dMin = -2
else:
dMin = -1
if dMin > dec:
modelXbrl.error("EIOPA.S.2.18.c",
_("Monetary fact %(fact)s of context %(contextID)s has a decimals attribute less than minimum %(minimumDecimals)s: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, minimumDecimals=dMin, decimals=f.decimals)
elif dec < -3:
modelXbrl.error(("EBA.2.18","EIOPA.S.2.18.c"),
_("Monetary fact %(fact)s of context %(contextID)s has a decimals attribute < -3: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
else: # apply dynamic decimals check
if -.1 < xValue < .1: dMin = 2
elif -1 < xValue < 1: dMin = 1
elif -10 < xValue < 10: dMin = 0
elif -100 < xValue < 100: dMin = -1
elif -1000 < xValue < 1000: dMin = -2
else: dMin = -3
if dMin > dec:
modelXbrl.warning("EIOPA:factDecimalsWarning",
_("Monetary fact %(fact)s of context %(contextID)s value %(value)s has an imprecise decimals attribute: %(decimals)s, minimum is %(mindec)s"),
modelObject=f, fact=f.qname, contextID=f.contextID, value=xValue, decimals=f.decimals, mindec=dMin)
elif isInteger:
if dec != 0:
modelXbrl.error(("EBA.2.18","EIOPA.S.2.18.d"),
_("Integer fact %(fact)s of context %(contextID)s has a decimals attribute \u2260 0: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
elif isPercent:
if dec < 4:
modelXbrl.error(("EBA.2.18","EIOPA.S.2.18.e"),
_("Percent fact %(fact)s of context %(contextID)s has a decimals attribute < 4: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
if val.isEIOPA_2_0_1 and xValue > 1:
modelXbrl.warning(("EIOPA.3.2.b"),
_("Percent fact %(fact)s of context %(contextID)s appears to be over 100% = 1.0: '%(value)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, value=xValue)
else:
if -.001 < xValue < .001: dMin = 4
elif -.01 < xValue < .01: dMin = 3
elif -.1 < xValue < .1: dMin = 2
elif -1 < xValue < 1: dMin = 1
else: dMin = 0
if dMin > dec:
modelXbrl.warning("EIOPA:factDecimalsWarning",
_("Numeric fact %(fact)s of context %(contextID)s value %(value)s has an imprecise decimals attribute: %(decimals)s, minimum is %(mindec)s"),
modelObject=f, fact=f.qname, contextID=f.contextID, value=xValue, decimals=f.decimals, mindec=dMin)
except (__HOLE__, ValueError):
pass # should have been reported as a schema error by loader
'''' (not intended by EBA 2.18, paste here is from EFM)
if not f.isNil and getattr(f,"xValid", 0) == 4:
try:
insignificance = insignificantDigits(f.xValue, decimals=f.decimals)
if insignificance: # if not None, returns (truncatedDigits, insiginficantDigits)
modelXbrl.error(("EFM.6.05.37", "GFM.1.02.26"),
_("Fact %(fact)s of context %(contextID)s decimals %(decimals)s value %(value)s has nonzero digits in insignificant portion %(insignificantDigits)s."),
modelObject=f1, fact=f1.qname, contextID=f1.contextID, decimals=f1.decimals,
value=f1.xValue, truncatedDigits=insignificance[0], insignificantDigits=insignificance[1])
except (ValueError,TypeError):
modelXbrl.error(("EBA.2.18"),
_("Fact %(fact)s of context %(contextID)s decimals %(decimals)s value %(value)s causes Value Error exception."),
modelObject=f1, fact=f1.qname, contextID=f1.contextID, decimals=f1.decimals, value=f1.value)
'''
unit = f.unit
if unit is not None:
if isMonetary:
if unit.measures[0]:
_currencyMeasure = unit.measures[0][0]
if val.isEIOPA_2_0_1 and f.context is not None:
if f.context.dimMemberQname(val.qnDimAF) == val.qnCAx1 and val.qnDimOC in f.context.qnameDims:
_ocCurrency = f.context.dimMemberQname(val.qnDimOC).localName
if _currencyMeasure.localName != _ocCurrency:
modelXbrl.error("EIOPA.3.1",
_("There MUST be only one currency but metric %(metric)s reported OC dimension currency %(ocCurrency)s differs from unit currency: %(unitCurrency)s."),
modelObject=f, metric=f.qname, ocCurrency=_ocCurrency, unitCurrency=_currencyMeasure.localName)
else:
val.currenciesUsed[_currencyMeasure] = unit
else:
val.currenciesUsed[_currencyMeasure] = unit
elif not unit.isSingleMeasure or unit.measures[0][0] != XbrlConst.qnXbrliPure:
nonMonetaryNonPureFacts.append(f)
if isEnum:
_eQn = getattr(f,"xValue", None) or qnameEltPfxName(f, f.value)
if _eQn:
prefixUsed(val, _eQn.namespaceURI, _eQn.prefix)
if val.isEIOPA_2_0_1 and f.qname.localName == "ei1930":
val.reportingCurrency = _eQn.localName
elif isString:
if f.xmlLang: # requires disclosureSystem to NOT specify default language
stringFactsWithXmlLang.append(f)
if f.isNil:
nilFacts.append(f)
if val.footnotesRelationshipSet.fromModelObject(f):
modelXbrl.warning("EIOPA.S.19",
_("Fact %(fact)s of context %(contextID)s has footnotes.'"),
modelObject=f, fact=f.qname, contextID=f.contextID)
if nilFacts:
modelXbrl.error(("EBA.2.19", "EIOPA.S.2.19"),
_('Nil facts MUST NOT be present in the instance: %(nilFacts)s.'),
modelObject=nilFacts, nilFacts=", ".join(str(f.qname) for f in nilFacts))
if stringFactsWithXmlLang:
modelXbrl.warning("EIOPA.2.20", # not reported for EBA
_("String facts reporting xml:lang (not saved by T4U, not round-tripped): '%(factsWithLang)s'"),
modelObject=stringFactsWithXmlLang, factsWithLang=", ".join(set(str(f.qname) for f in stringFactsWithXmlLang)))
if nonMonetaryNonPureFacts:
modelXbrl.error(("EBA.3.2","EIOPA.3.2.a"),
_("Non monetary (numeric) facts MUST use the pure unit: '%(langLessFacts)s'"),
modelObject=nonMonetaryNonPureFacts, langLessFacts=", ".join(set(str(f.qname) for f in nonMonetaryNonPureFacts)))
val.utrValidator.validateFacts() # validate facts for UTR at logLevel WARNING
unitHashes = {}
for unit in modelXbrl.units.values():
h = unit.hash
if h in unitHashes and unit.isEqualTo(unitHashes[h]):
modelXbrl.warning("EBA.2.21",
_("Duplicate units SHOULD NOT be reported, units %(unit1)s and %(unit2)s have same measures.'"),
modelObject=(unit, unitHashes[h]), unit1=unit.id, unit2=unitHashes[h].id)
if not getattr(modelXbrl, "isStreamingMode", False):
modelXbrl.error("EIOPA.2.21",
_("Duplicate units MUST NOT be reported, units %(unit1)s and %(unit2)s have same measures.'"),
modelObject=(unit, unitHashes[h]), unit1=unit.id, unit2=unitHashes[h].id)
else:
unitHashes[h] = unit
for _measures in unit.measures:
for _measure in _measures:
prefixUsed(val, _measure.namespaceURI, _measure.prefix)
del unitHashes
cntxHashes = {}
for cntx in modelXbrl.contexts.values():
h = cntx.contextDimAwareHash
if h in cntxHashes and cntx.isEqualTo(cntxHashes[h]):
if not getattr(modelXbrl, "isStreamingMode", False):
modelXbrl.log("WARNING" if val.isEIOPAfullVersion else "ERROR",
"EIOPA.S.2.7.b",
_("Duplicate contexts MUST NOT be reported, contexts %(cntx1)s and %(cntx2)s are equivalent.'"),
modelObject=(cntx, cntxHashes[h]), cntx1=cntx.id, cntx2=cntxHashes[h].id)
else:
cntxHashes[h] = cntx
for _dim in cntx.qnameDims.values():
_dimQn = _dim.dimensionQname
prefixUsed(val, _dimQn.namespaceURI, _dimQn.prefix)
if _dim.isExplicit:
_memQn = _dim.memberQname
else:
_memQn = _dim.typedMember.qname
if _memQn:
prefixUsed(val, _memQn.namespaceURI, _memQn.prefix)
for elt in modelDocument.xmlRootElement.iter():
if isinstance(elt, ModelObject): # skip comments and processing instructions
prefixUsed(val, elt.qname.namespaceURI, elt.qname.prefix)
for attrTag in elt.keys():
if attrTag.startswith("{"):
_prefix, _NS, _localName = XmlUtil.clarkNotationToPrefixNsLocalname(elt, attrTag, isAttribute=True)
if _prefix:
prefixUsed(val, _NS, _prefix)
elif val.isEIOPA_2_0_1:
if elt.tag in ("{http://www.w3.org/2001/XMLSchema}documentation", "{http://www.w3.org/2001/XMLSchema}annotation"):
modelXbrl.error("EIOPA.2.5",
_("xs:documentation element found, all relevant business data MUST only be contained in contexts, units, schemaRef and facts."),
modelObject=modelDocument)
elif isinstance(elt, etree._Comment):
modelXbrl.error("EIOPA.2.5",
_("XML comment found, all relevant business data MUST only be contained in contexts, units, schemaRef and facts: %(comment)s"),
modelObject=modelDocument, comment=elt.text)
|
AttributeError
|
dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/validate/EBA/__init__.py/validateFacts
|
1,379
|
def final(val):
if not (val.validateEBA or val.validateEIOPA):
return
modelXbrl = val.modelXbrl
modelDocument = modelXbrl.modelDocument
_statusMsg = _("validating {0} filing rules").format(val.disclosureSystem.name)
modelXbrl.profileActivity()
modelXbrl.modelManager.showStatus(_statusMsg)
if modelDocument.type == ModelDocument.Type.INSTANCE and (val.validateEBA or val.validateEIOPA):
if not modelDocument.uri.endswith(".xbrl"):
modelXbrl.warning("EBA.1.1",
_('XBRL instance documents SHOULD use the extension ".xbrl" but it is "%(extension)s"'),
modelObject=modelDocument, extension=os.path.splitext(modelDocument.basename)[1])
modelXbrl.error("EIOPA.S.1.1.a",
_('XBRL instance documents MUST use the extension ".xbrl" but it is "%(extension)s"'),
modelObject=modelDocument, extension=os.path.splitext(modelDocument.basename)[1])
if val.isEIOPA_2_0_1: _encodings = ("UTF-8", "utf-8-sig")
else: _encodings = ("utf-8", "UTF-8", "utf-8-sig")
if modelDocument.documentEncoding not in _encodings:
modelXbrl.error(("EBA.1.4", "EIOPA.1.4"),
_('XBRL instance documents MUST use "UTF-8" encoding but is "%(xmlEncoding)s"'),
modelObject=modelDocument, xmlEncoding=modelDocument.documentEncoding)
schemaRefElts = []
schemaRefFileNames = []
for doc, docRef in modelDocument.referencesDocument.items():
if docRef.referenceType == "href":
if docRef.referringModelObject.localName == "schemaRef":
schemaRefElts.append(docRef.referringModelObject)
schemaRefFileNames.append(doc.basename)
if not UrlUtil.isAbsolute(doc.uri):
modelXbrl.error(("EBA.2.2", "EIOPA.S.1.5.a" if val.isEIOPAfullVersion else "EIOPA.S.1.5.b"),
_('The link:schemaRef element in submitted instances MUST resolve to the full published entry point URL: %(url)s.'),
modelObject=docRef.referringModelObject, url=doc.uri,
messageCodes=("EBA.2.2", "EIOPA.S.1.5.a","EIOPA.S.1.5.b"))
elif docRef.referringModelObject.localName == "linkbaseRef":
modelXbrl.error(("EBA.2.3","EIOPA.S.1.5.a"),
_('The link:linkbaseRef element is not allowed: %(fileName)s.'),
modelObject=docRef.referringModelObject, fileName=doc.basename)
_numSchemaRefs = len(XmlUtil.children(modelDocument.xmlRootElement, XbrlConst.link, "schemaRef"))
if _numSchemaRefs > 1:
modelXbrl.error(("EIOPA.S.1.5.a", "EBA.1.5"),
_('XBRL instance documents MUST reference only one entry point schema but %(numEntryPoints)s were found: %(entryPointNames)s'),
modelObject=modelDocument, numEntryPoints=_numSchemaRefs, entryPointNames=', '.join(sorted(schemaRefFileNames)))
### check entry point names appropriate for filing indicator (DPM DB?)
if len(schemaRefElts) != 1:
modelXbrl.error("EBA.2.3",
_('Any reported XBRL instance document MUST contain only one xbrli:xbrl/link:schemaRef node, but %(entryPointCount)s.'),
modelObject=schemaRefElts, entryPointCount=len(schemaRefElts))
# non-streaming EBA checks
if not getattr(modelXbrl, "isStreamingMode", False):
val.qnReportedCurrency = None
if val.isEIOPA_2_0_1 and qnMetReportingCurrency in modelXbrl.factsByQname:
for _multiCurrencyFact in modelXbrl.factsByQname[qnMetReportingCurrency]:
# multi-currency fact
val.qnReportedCurrency = _multiCurrencyFact.xValue
break
validateFacts(val, modelXbrl.facts)
# check sum of fact md5s (otherwise checked in streaming process)
xbrlFactsCheckVersion = None
expectedSumOfFactMd5s = None
for pi in modelDocument.xmlRootElement.getchildren():
if isinstance(pi, etree._ProcessingInstruction) and pi.target == "xbrl-facts-check":
_match = re.search("([\\w-]+)=[\"']([^\"']+)[\"']", pi.text)
if _match:
_matchGroups = _match.groups()
if len(_matchGroups) == 2:
if _matchGroups[0] == "version":
xbrlFactsCheckVersion = _matchGroups[1]
elif _matchGroups[0] == "sum-of-fact-md5s":
try:
expectedSumOfFactMd5s = Md5Sum(_matchGroups[1])
except __HOLE__:
modelXbrl.error("EIOPA:xbrlFactsCheckError",
_("Invalid sum-of-md5s %(sumOfMd5)s"),
modelObject=modelXbrl, sumOfMd5=_matchGroups[1])
if xbrlFactsCheckVersion and expectedSumOfFactMd5s:
sumOfFactMd5s = Md5Sum()
for f in modelXbrl.factsInInstance:
sumOfFactMd5s += f.md5sum
if sumOfFactMd5s != expectedSumOfFactMd5s:
modelXbrl.warning("EIOPA:xbrlFactsCheckWarning",
_("XBRL facts sum of md5s expected %(expectedMd5)s not matched to actual sum %(actualMd5Sum)s"),
modelObject=modelXbrl, expectedMd5=expectedSumOfFactMd5s, actualMd5Sum=sumOfFactMd5s)
else:
modelXbrl.info("info",
_("Successful XBRL facts sum of md5s."),
modelObject=modelXbrl)
if any(badError in modelXbrl.errors
for badError in ("EBA.2.1", "EIOPA.2.1", "EIOPA.S.1.5.a/EIOPA.S.1.5.b")):
pass # skip checking filingIndicators if bad errors
elif not val.filingIndicators:
modelXbrl.error(("EBA.1.6", "EIOPA.1.6.a"),
_('Missing filing indicators. Reported XBRL instances MUST include appropriate (positive) filing indicator elements'),
modelObject=modelDocument)
elif all(filed == False for filed in val.filingIndicators.values()):
modelXbrl.error(("EBA.1.6", "EIOPA.1.6.a"),
_('All filing indicators are filed="false". Reported XBRL instances MUST include appropriate (positive) filing indicator elements'),
modelObject=modelDocument)
if val.numFilingIndicatorTuples > 1:
modelXbrl.warning(("EBA.1.6.2", "EIOPA.1.6.2"),
_('Multiple filing indicators tuples when not in streaming mode (info).'),
modelObject=modelXbrl.factsByQname[qnFIndicators])
if len(val.cntxDates) > 1:
modelXbrl.error(("EBA.2.13","EIOPA.2.13"),
_('Contexts must have the same date: %(dates)s.'),
# when streaming values are no longer available, but without streaming they can be logged
modelObject=set(_cntx for _cntxs in val.cntxDates.values() for _cntx in _cntxs),
dates=', '.join(XmlUtil.dateunionValue(_dt, subtractOneDay=True)
for _dt in val.cntxDates.keys()))
if val.unusedCntxIDs:
if val.isEIOPA_2_0_1:
modelXbrl.error("EIOPA.2.7",
_('Unused xbrli:context nodes MUST NOT be present in the instance: %(unusedContextIDs)s.'),
modelObject=[modelXbrl.contexts[unusedCntxID] for unusedCntxID in val.unusedCntxIDs if unusedCntxID in modelXbrl.contexts],
unusedContextIDs=", ".join(sorted(val.unusedCntxIDs)))
else:
modelXbrl.warning(("EBA.2.7", "EIOPA.2.7"),
_('Unused xbrli:context nodes SHOULD NOT be present in the instance: %(unusedContextIDs)s.'),
modelObject=[modelXbrl.contexts[unusedCntxID] for unusedCntxID in val.unusedCntxIDs if unusedCntxID in modelXbrl.contexts],
unusedContextIDs=", ".join(sorted(val.unusedCntxIDs)))
if len(val.cntxEntities) > 1:
modelXbrl.error(("EBA.2.9", "EIOPA.2.9"),
_('All entity identifiers and schemes MUST be the same, %(count)s found: %(entities)s.'),
modelObject=modelDocument, count=len(val.cntxEntities),
entities=", ".join(sorted(str(cntxEntity) for cntxEntity in val.cntxEntities)))
for _scheme, _LEI in val.cntxEntities:
if (_scheme in ("http://standards.iso.org/iso/17442", "http://standard.iso.org/iso/17442", "LEI") or
(not val.isEIOPAfullVersion and _scheme == "PRE-LEI")):
result = LeiUtil.checkLei(_LEI)
if result == LeiUtil.LEI_INVALID_LEXICAL:
modelXbrl.error("EIOPA.S.2.8.c",
_("Context has lexically invalid LEI %(lei)s."),
modelObject=modelDocument, lei=_LEI)
elif result == LeiUtil.LEI_INVALID_CHECKSUM:
modelXbrl.error("EIOPA.S.2.8.c",
_("Context has LEI checksum error in %(lei)s."),
modelObject=modelDocument, lei=_LEI)
if _scheme == "http://standard.iso.org/iso/17442":
modelXbrl.warning("EIOPA.S.2.8.c",
_("Warning, context has entity scheme %(scheme)s should be plural: http://standards.iso.org/iso/17442."),
modelObject=modelDocument, scheme=_scheme)
elif _scheme == "SC":
pass # anything is ok for Specific Code
else:
modelXbrl.error("EIOPA.S.2.8.c",
_("Context has unrecognized entity scheme %(scheme)s."),
modelObject=modelDocument, scheme=_scheme)
if val.unusedUnitIDs:
if val.isEIOPA_2_0_1:
modelXbrl.error("EIOPA.2.22",
_('Unused xbrli:unit nodes MUST NOT be present in the instance: %(unusedUnitIDs)s.'),
modelObject=[modelXbrl.units[unusedUnitID] for unusedUnitID in val.unusedUnitIDs if unusedUnitID in modelXbrl.units],
unusedUnitIDs=", ".join(sorted(val.unusedUnitIDs)))
else:
modelXbrl.warning(("EBA.2.22", "EIOPA.2.22"),
_('Unused xbrli:unit nodes SHOULD NOT be present in the instance: %(unusedUnitIDs)s.'),
modelObject=[modelXbrl.units[unusedUnitID] for unusedUnitID in val.unusedUnitIDs if unusedUnitID in modelXbrl.units],
unusedUnitIDs=", ".join(sorted(val.unusedUnitIDs)))
if len(val.currenciesUsed) > 1:
modelXbrl.error(("EBA.3.1","EIOPA.3.1"),
_("There MUST be only one currency but %(numCurrencies)s were found: %(currencies)s.'"),
modelObject=val.currenciesUsed.values(), numCurrencies=len(val.currenciesUsed), currencies=", ".join(str(c) for c in val.currenciesUsed.keys()))
elif val.isEIOPA_2_0_1 and any(_measure.localName != val.reportingCurrency for _measure in val.currenciesUsed.keys()):
modelXbrl.error("EIOPA.3.1",
_("There MUST be only one currency but reporting currency %(reportingCurrency)s differs from unit currencies: %(currencies)s.'"),
modelObject=val.currenciesUsed.values(), reportingCurrency=val.reportingCurrency, currencies=", ".join(str(c) for c in val.currenciesUsed.keys()))
if val.prefixesUnused:
modelXbrl.warning(("EBA.3.4", "EIOPA.3.4"),
_("There SHOULD be no unused prefixes but these were declared: %(unusedPrefixes)s.'"),
modelObject=modelDocument, unusedPrefixes=', '.join(sorted(val.prefixesUnused)))
for ns, prefixes in val.namespacePrefixesUsed.items():
nsDocs = modelXbrl.namespaceDocs.get(ns)
if nsDocs:
for nsDoc in nsDocs:
nsDocPrefix = XmlUtil.xmlnsprefix(nsDoc.xmlRootElement, ns)
if any(prefix != nsDocPrefix for prefix in prefixes if prefix is not None):
modelXbrl.warning(("EBA.3.5", "EIOPA.3.5"),
_("Prefix for namespace %(namespace)s is %(declaredPrefix)s but these were found %(foundPrefixes)s"),
modelObject=modelDocument, namespace=ns, declaredPrefix=nsDocPrefix, foundPrefixes=', '.join(sorted(prefixes - {None})))
elif ns in CANONICAL_PREFIXES and any(prefix != CANONICAL_PREFIXES[ns] for prefix in prefixes if prefix is not None):
modelXbrl.warning(("EBA.3.5", "EIOPA.3.5"),
_("Prefix for namespace %(namespace)s is %(declaredPrefix)s but these were found %(foundPrefixes)s"),
modelObject=modelDocument, namespace=ns, declaredPrefix=CANONICAL_PREFIXES[ns], foundPrefixes=', '.join(sorted(prefixes - {None})))
modelXbrl.profileActivity(_statusMsg, minTimeToShow=0.0)
modelXbrl.modelManager.showStatus(None)
del val.prefixNamespace, val.namespacePrefix, val.idObjects, val.typedDomainElements
del val.utrValidator, val.firstFact, val.footnotesRelationshipSet
|
ValueError
|
dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/validate/EBA/__init__.py/final
|
1,380
|
def __del__(self):
# This method can be called during interpreter shutdown, which means we
# must do the absolute minimum here. Note that there could be running
# daemon threads that are trying to call other methods on this object.
try:
self.os.close(self._inotify_fd)
except (AttributeError, __HOLE__):
pass
|
TypeError
|
dataset/ETHPy150Open powerline/powerline/powerline/lib/inotify.py/INotify.__del__
|
1,381
|
def handle(self, *args, **options):
try:
jad_path, jar_path = args
except __HOLE__:
raise CommandError('Usage: %s\n%s' % (self.args, self.help))
with open(jad_path, 'r') as f:
jad_file = f.read()
with open(jar_path, 'rb') as f:
jar_file = f.read()
new_jad = sign_jar(jad_file, jar_file)
with open('{}.signed'.format(jad_path), 'w') as f:
f.write(new_jad)
return 'signed jad file and saved copy as [jad].signed'
|
ValueError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/builds/management/commands/sign_jadjar.py/Command.handle
|
1,382
|
def fromEnvironment(cls):
"""
Get as many of the platform-specific error translation objects as
possible and return an instance of C{cls} created with them.
"""
try:
from ctypes import WinError
except ImportError:
WinError = None
try:
from win32api import FormatMessage
except ImportError:
FormatMessage = None
try:
from socket import errorTab
except __HOLE__:
errorTab = None
return cls(WinError, FormatMessage, errorTab)
|
ImportError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/python/win32.py/_ErrorFormatter.fromEnvironment
|
1,383
|
def test_autofield_field_raises_error_message(self):
f = models.AutoField(primary_key=True)
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages, [u"'foo' value must be an integer."])
# primary_key must be True. Refs #12467.
self.assertRaises(AssertionError, models.AutoField, 'primary_key', False)
try:
models.AutoField(primary_key=False)
except __HOLE__, e:
self.assertEqual(str(e), "AutoFields must have primary_key=True.")
|
AssertionError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/modeltests/validation/test_error_messages.py/ValidationMessagesTest.test_autofield_field_raises_error_message
|
1,384
|
def test_integer_field_raises_error_message(self):
f = models.IntegerField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except __HOLE__, e:
self.assertEqual(e.messages, [u"'foo' value must be an integer."])
|
ValidationError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/modeltests/validation/test_error_messages.py/ValidationMessagesTest.test_integer_field_raises_error_message
|
1,385
|
def test_boolean_field_raises_error_message(self):
f = models.BooleanField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except __HOLE__, e:
self.assertEqual(e.messages,
[u"'foo' value must be either True or False."])
|
ValidationError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/modeltests/validation/test_error_messages.py/ValidationMessagesTest.test_boolean_field_raises_error_message
|
1,386
|
def test_float_field_raises_error_message(self):
f = models.FloatField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except __HOLE__, e:
self.assertEqual(e.messages, [u"'foo' value must be a float."])
|
ValidationError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/modeltests/validation/test_error_messages.py/ValidationMessagesTest.test_float_field_raises_error_message
|
1,387
|
def test_decimal_field_raises_error_message(self):
f = models.DecimalField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except __HOLE__, e:
self.assertEqual(e.messages,
[u"'foo' value must be a decimal number."])
|
ValidationError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/modeltests/validation/test_error_messages.py/ValidationMessagesTest.test_decimal_field_raises_error_message
|
1,388
|
def test_null_boolean_field_raises_error_message(self):
f = models.NullBooleanField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except __HOLE__, e:
self.assertEqual(e.messages,
[u"'foo' value must be either None, True or False."])
|
ValidationError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/modeltests/validation/test_error_messages.py/ValidationMessagesTest.test_null_boolean_field_raises_error_message
|
1,389
|
def test_date_field_raises_error_message(self):
f = models.DateField()
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'foo' value has an invalid date format. "
u"It must be in YYYY-MM-DD format."])
self.assertRaises(ValidationError, f.clean, 'aaaa-10-10', None)
try:
f.clean('aaaa-10-10', None)
except __HOLE__, e:
self.assertEqual(e.messages, [
u"'aaaa-10-10' value has an invalid date format. "
u"It must be in YYYY-MM-DD format."])
self.assertRaises(ValidationError, f.clean, '2011-13-10', None)
try:
f.clean('2011-13-10', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'2011-13-10' value has the correct format (YYYY-MM-DD) "
u"but it is an invalid date."])
self.assertRaises(ValidationError, f.clean, '2011-10-32', None)
try:
f.clean('2011-10-32', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'2011-10-32' value has the correct format (YYYY-MM-DD) "
u"but it is an invalid date."])
|
ValidationError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/modeltests/validation/test_error_messages.py/ValidationMessagesTest.test_date_field_raises_error_message
|
1,390
|
def test_datetime_field_raises_error_message(self):
f = models.DateTimeField()
# Wrong format
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'foo' value has an invalid format. It must be "
u"in YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."])
# Correct format but invalid date
self.assertRaises(ValidationError, f.clean, '2011-10-32', None)
try:
f.clean('2011-10-32', None)
except __HOLE__, e:
self.assertEqual(e.messages, [
u"'2011-10-32' value has the correct format "
u"(YYYY-MM-DD) but it is an invalid date."])
# Correct format but invalid date/time
self.assertRaises(ValidationError, f.clean, '2011-10-32 10:10', None)
try:
f.clean('2011-10-32 10:10', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'2011-10-32 10:10' value has the correct format "
u"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
u"but it is an invalid date/time."])
|
ValidationError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/modeltests/validation/test_error_messages.py/ValidationMessagesTest.test_datetime_field_raises_error_message
|
1,391
|
def test_time_field_raises_error_message(self):
f = models.TimeField()
# Wrong format
self.assertRaises(ValidationError, f.clean, 'foo', None)
try:
f.clean('foo', None)
except __HOLE__, e:
self.assertEqual(e.messages, [
u"'foo' value has an invalid format. It must be in "
u"HH:MM[:ss[.uuuuuu]] format."])
# Correct format but invalid time
self.assertRaises(ValidationError, f.clean, '25:50', None)
try:
f.clean('25:50', None)
except ValidationError, e:
self.assertEqual(e.messages, [
u"'25:50' value has the correct format "
u"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."])
|
ValidationError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/modeltests/validation/test_error_messages.py/ValidationMessagesTest.test_time_field_raises_error_message
|
1,392
|
@app.route('/secret', methods=['GET'])
def secret():
while 1:
s = base64.urlsafe_b64encode(os.urandom(48))
try:
iter(backend.list(s)).next()
except __HOLE__:
break
return MeteredResponse(response='{0}\n'.format(s),
status=201,
content_type='text/plain')
|
StopIteration
|
dataset/ETHPy150Open devstructure/blueprint/blueprint/io/server/__init__.py/secret
|
1,393
|
@app.route('/<secret>/<name>', methods=['PUT'])
def put_blueprint(secret, name):
validate_secret(secret)
validate_name(name)
librato.count('blueprint-io-server.bandwidth.in', request.content_length)
statsd.update('blueprint-io-server.bandwidth.in', request.content_length)
validate_content_length()
# Validate the blueprint JSON format. This could stand more rigor
# or, dare I say it, a schema?
try:
for k in request.json.iterkeys():
if k not in ('arch', 'files', 'packages', 'services', 'sources'):
abort(400)
except __HOLE__:
abort(400)
# Remove tarballs referenced by the old blueprint but not the new one.
b = _blueprint(secret, name)
if b is not None and b is not False:
for filename in set(b.sources.itervalues()) - \
set(request.json.get('sources', {}).itervalues()):
backend.delete_tarball(secret, name, filename[0:-4])
# Store the blueprint JSON in S3.
if not backend.put_blueprint(secret, name, request.data):
abort(502)
return MeteredResponse(response='',
status=202,
content_type='text/plain')
|
ValueError
|
dataset/ETHPy150Open devstructure/blueprint/blueprint/io/server/__init__.py/put_blueprint
|
1,394
|
def get_test_jobs(args):
dbsession = models.get_session()
TJ = models.TestJob
for jobid in args:
try:
jobid = int(jobid)
except __HOLE__:
pass
try:
if type(jobid) is int:
testjob = dbsession.query(TJ).get(jobid)
else:
testjob = dbsession.query(TJ).filter(TJ.name==jobid).one()
except models.NoResultFound:
logging.warn("No TestJob with id %r" % jobid)
continue
else:
yield testjob
|
ValueError
|
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/QA/jobrunner.py/get_test_jobs
|
1,395
|
@pytest.fixture
def redis():
try:
from redis import StrictRedis
from redis.exceptions import ConnectionError
except __HOLE__:
pytest.skip('redis library not installed')
try:
r = StrictRedis()
r.ping()
except ConnectionError:
pytest.skip('could not connect to redis')
r.flushall()
return r
|
ImportError
|
dataset/ETHPy150Open mbr/flask-kvsession/tests/conftest.py/redis
|
1,396
|
def concat(objs, dim=None, data_vars='all', coords='different',
compat='equals', positions=None, indexers=None, mode=None,
concat_over=None):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
auto_combine
"""
# TODO: add join and ignore_index arguments copied from pandas.concat
# TODO: support concatenating scaler coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except __HOLE__:
raise ValueError('must supply at least one object to concatenate')
if dim is None:
warnings.warn('the `dim` argument to `concat` will be required '
'in a future version of xarray; for now, setting it to '
"the old default of 'concat_dim'",
FutureWarning, stacklevel=2)
dim = 'concat_dims'
if indexers is not None: # pragma: nocover
warnings.warn('indexers has been renamed to positions; the alias '
'will be removed in a future version of xarray',
FutureWarning, stacklevel=2)
positions = indexers
if mode is not None:
raise ValueError('`mode` is no longer a valid argument to '
'xarray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
if concat_over is not None:
raise ValueError('`concat_over` is no longer a valid argument to '
'xarray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError('can only concatenate xarray Dataset and DataArray '
'objects, got %s' % type(first_obj))
return f(objs, dim, data_vars, coords, compat, positions)
|
StopIteration
|
dataset/ETHPy150Open pydata/xarray/xarray/core/combine.py/concat
|
1,397
|
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ProfileListView, self).get_context_data(**kwargs)
try:
page = int(self.request.GET.get('page', None))
except (__HOLE__, ValueError):
page = self.page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not self.request.user.is_staff:
raise Http404
if not self.extra_context: self.extra_context = dict()
context['page'] = page
context['paginate_by'] = self.paginate_by
context['extra_context'] = self.extra_context
return context
|
TypeError
|
dataset/ETHPy150Open bread-and-pepper/django-userena/userena/views.py/ProfileListView.get_context_data
|
1,398
|
def profile_list(request, page=1, template_name='userena/profile_list.html',
paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn("views.profile_list is deprecated. Use ProfileListView instead", DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None))
except (__HOLE__, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context: extra_context = dict()
return ProfileListView.as_view(queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
**kwargs)(request)
|
TypeError
|
dataset/ETHPy150Open bread-and-pepper/django-userena/userena/views.py/profile_list
|
1,399
|
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except __HOLE__:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
|
ValueError
|
dataset/ETHPy150Open slacy/minimongo/minimongo/config.py/_resolve_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.