Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
400
|
def load(filename):
'''Load a known_hosts file, if not already loaded'''
filename = os.path.expanduser(filename)
with _lock:
if filename not in _loaded_files:
try:
_loaded_files[filename] = KnownHosts(filename)
except __HOLE__ as e:
logging.getLogger('radssh.keys').info('Unable to load known_hosts from %s: %s' % (filename, str(e)))
_loaded_files[filename] = KnownHosts()
_loaded_files[filename]._filename = filename
return _loaded_files[filename]
|
IOError
|
dataset/ETHPy150Open radssh/radssh/radssh/known_hosts.py/load
|
401
|
def find_first_key(hostname, known_hosts_files=['~/.ssh/known_hosts'], port=22):
'''
Look for first matching host key in a sequence of known_hosts files
'''
for f in known_hosts_files:
x = load(f)
try:
entry = next(x.matching_keys(hostname, port))
return entry
except __HOLE__:
pass
return None
|
StopIteration
|
dataset/ETHPy150Open radssh/radssh/radssh/known_hosts.py/find_first_key
|
402
|
def load(self, filename):
'''
Load and index keys from OpenSSH known_hosts file. In order to
preserve lines, the text content is stored in a list (_lines),
and indexes are used to keep line number(s) per host, as well as
index lists for hashed hosts and wildcard matches, which would
both need to be sequentially scanned if the host is not found
in the primary index lookup.
If this method is called multiple times, the host keys are appended,
not cleared. So multiple calls to `load` will produce a concatenation
of the loaded files, in order.
'''
offset = len(self._lines)
with open(filename, 'r') as f:
for lineno, line in enumerate(f):
self._lines.append(line.rstrip('\n'))
try:
e = HostKeyEntry.from_line(line, lineno)
if e is not None:
# Just construct the host index entries during load
# Identify as hashed entry, negation, wildcard, or regular
# Keep the index by the source lineno (plus offset, if
# loading multiple files), as the matching needs the
# whole line for negation logic, and to pick up the
# optional @marker...
for h in e.hostnames:
if h.startswith('|'):
self._hashed_hosts.append((h, offset + lineno))
elif h.startswith('!'):
# negation - do not index
pass
elif '*' in h or '?' in h:
self._wildcards.append((h, offset + lineno))
else:
self._index[h].append(offset + lineno)
except (UnreadableKey, __HOLE__) as e:
logging.getLogger('radssh.keys').error(
'Skipping unloadable key line (%s:%d): %s' % (filename, lineno + 1, line))
pass
|
TypeError
|
dataset/ETHPy150Open radssh/radssh/radssh/known_hosts.py/KnownHosts.load
|
403
|
@classmethod
def from_line(cls, line, lineno=None, filename=None):
'''
Parses the given line of text to find the name(s) for the host,
the type of key, and the key data.
'''
if not line or not line.strip():
return None
fields = line.strip().split(' ')
if not fields or fields[0].startswith('#'):
return None
if fields[0].startswith('@'):
marker = fields[0]
fields = fields[1:]
else:
marker = None
if len(fields) < 3:
raise UnreadableKey('Invalid known_hosts line', line, lineno)
names, keytype, key = fields[:3]
names = names.split(',')
# Decide what kind of key we're looking at and create an object
# to hold it accordingly.
key = key.encode('ascii')
# SSH-2 Key format consists of 2 (text) fields
# keytype, base64_blob
try:
if keytype == 'ssh-rsa':
key = paramiko.RSAKey(data=base64.b64decode(key))
elif keytype == 'ssh-dss':
key = paramiko.DSSKey(data=base64.b64decode(key))
elif keytype == 'ecdsa-sha2-nistp256':
key = paramiko.ECDSAKey(data=base64.b64decode(key), validate_point=False)
elif len(fields) > 3:
# SSH-1 Key format consists of 3 integer fields
# bits, exponent, modulus (RSA Only)
try:
bits = int(fields[1])
exponent = int(fields[2])
modulus = long(fields[3])
key = paramiko.RSAKey(vals=(exponent, modulus))
except __HOLE__:
raise UnreadableKey('Invalid known_hosts line', line, lineno, filename)
else:
raise UnreadableKey('Invalid known_hosts line', line, lineno, filename)
return cls(names, key, marker, lineno, filename)
except Exception as e:
raise UnreadableKey('Invalid known_hosts line (%s)' % e, line, lineno, filename)
|
ValueError
|
dataset/ETHPy150Open radssh/radssh/radssh/known_hosts.py/HostKeyEntry.from_line
|
404
|
def statusUpdate(self, driver, update):
logging.info("Task %s is in state %s, data %s",
update.task_id.value, mesos_pb2.TaskState.Name(update.state), str(update.data))
try:
key = self.task_key_map[update.task_id.value]
except __HOLE__:
# The map may not contain an item if the framework re-registered after a failover.
# Discard these tasks.
logging.warn("Unrecognised task key %s" % update.task_id.value)
return
if update.state == mesos_pb2.TASK_FINISHED:
self.result_queue.put((key, State.SUCCESS))
self.task_queue.task_done()
if update.state == mesos_pb2.TASK_LOST or \
update.state == mesos_pb2.TASK_KILLED or \
update.state == mesos_pb2.TASK_FAILED:
self.result_queue.put((key, State.FAILED))
self.task_queue.task_done()
|
KeyError
|
dataset/ETHPy150Open airbnb/airflow/airflow/contrib/executors/mesos_executor.py/AirflowMesosScheduler.statusUpdate
|
405
|
def __init__(self, monitoring_latency, stats_interval=None,
ip_address=None):
super().__init__(monitoring_latency)
self.__name = None
self.__hardware_address = None
if ip_address is None:
ip_address = NetworkInterface.__get_active_ip_address()
self.__ip_address = ip_address
self.__broadcast_address = None
self.__subnet_mask = None
self.__default_route = None
self.__bytes_sent = 0
self.__bytes_recv = 0
# Get interface name, network mask and broadcast address
if self.__ip_address is not None:
for interface in nif.interfaces():
addresses = nif.ifaddresses(interface)
try:
af_inet = addresses[nif.AF_INET][0]
if af_inet['addr'] != self.__ip_address:
continue
af_link = addresses[nif.AF_LINK][0]
self.__name = NetworkInterface.__check_interface_name(
interface
)
self.__hardware_address = af_link['addr']
self.__broadcast_address = af_inet['broadcast']
self.__subnet_mask = af_inet['netmask']
break
except (__HOLE__, KeyError):
# ignore interfaces, which don't have MAC or IP
continue
# Get gateway address
if self.name is not None:
for gateway_info in nif.gateways()[nif.AF_INET]:
if self.name in gateway_info:
self.__default_route = gateway_info[0]
break
# Prepare to collect statistics
if stats_interval is None:
stats_interval = timedelta(hours=1)
self.__bytes_sent_stats = LimitedTimeTable(stats_interval)
self.__bytes_recv_stats = LimitedTimeTable(stats_interval)
# Read updating values at first time
self._monitoring_action()
pass
# endregion
# region properties
|
IndexError
|
dataset/ETHPy150Open uzumaxy/pyspectator/pyspectator/network.py/NetworkInterface.__init__
|
406
|
def run(xml_file):
gui = GUI(window)
loadxml.fromFile(gui, xml_file)
if '--dump' in sys.argv:
print '-'*75
gui.dump()
print '-'*75
window.push_handlers(gui)
gui.push_handlers(dragndrop.DragHandler('.draggable'))
@gui.select('#press-me')
def on_click(widget, *args):
print 'on_click', widget
return event.EVENT_HANDLED
@gui.select('#enable-other')
def on_click(widget, *args):
w = gui.get('#press-me')
w.setEnabled(not w.isEnabled())
return event.EVENT_HANDLED
@gui.select('button, text-button')
def on_click(widget, *args):
print 'DEBUG', widget, 'PRESSED'
return event.EVENT_UNHANDLED
@gui.select('.show-value')
def on_change(widget, value):
print 'DEBUG', widget, 'VALUE CHANGED', `value`
return event.EVENT_UNHANDLED
@gui.select('frame#menu-test', 'on_click')
def on_menu(w, x, y, button, modifiers, click_count):
if not widgets.PopupMenu.isActivatingClick(button, modifiers):
return event.EVENT_UNHANDLED
gui.get('#test-menu').expose((x, y))
return event.EVENT_HANDLED
@gui.select('.hover')
def on_element_enter(widget, *args):
print 'ENTER ELEMENT', widget.id
return event.EVENT_HANDLED
@gui.select('.hover')
def on_element_leave(widget, *args):
print 'LEAVE ELEMENT', widget.id
return event.EVENT_HANDLED
@gui.select('.drawer-control')
def on_click(widget, *args):
id = widget.id.replace('drawer-control', 'test-drawer')
gui.get('#'+id).toggle_state()
return event.EVENT_HANDLED
@gui.select('#question-dialog-test')
def on_click(widget, *args):
def f(*args):
print 'DIALOG SAYS', args
dialogs.Question(widget.getGUI(), 'Did this appear correctly?',
callback=f).run()
return event.EVENT_HANDLED
@gui.select('#message-dialog-test')
def on_click(widget, *args):
def f(*args):
print 'DIALOG SAYS', args
dialogs.Message(widget.getGUI(), 'Hello, World!', callback=f).run()
return event.EVENT_HANDLED
@gui.select('#music-test')
def on_click(widget, x, y, button, modifiers, click_count):
if not button & mouse.RIGHT:
return event.EVENT_UNHANDLED
def load_music(file=None):
if not file: return
gui.get('#music-test').delete()
m = widgets.Music(gui, file, id='music-test', playing=True)
m.gainFocus()
dialogs.FileOpen(gui, callback=load_music).run()
return event.EVENT_HANDLED
@gui.select('#movie-test')
def on_click(widget, x, y, button, modifiers, click_count):
if not button & mouse.RIGHT:
return event.EVENT_UNHANDLED
def load_movie(file=None):
print 'DIALOG SELECTION:', file
if not file: return
gui.get('#movie-test').delete()
m = widgets.Movie(gui, file, id='movie-test', playing=True)
m.gainFocus()
dialogs.FileOpen(gui, callback=load_movie).run()
return event.EVENT_HANDLED
@gui.select('#movie-test')
def on_text(widget, text):
if text == 'f':
gui.get('#movie-test').video.pause()
anim.Delayed(gui.get('#movie-test').video.play, duration=10)
window.set_fullscreen()
return event.EVENT_HANDLED
@gui.select('.droppable')
def on_drop(widget, x, y, button, modifiers, element):
element.reparent(widget)
widget.bgcolor = (1, 1, 1, 1)
return event.EVENT_HANDLED
@gui.select('.droppable')
def on_drag_enter(widget, x, y, element):
widget.bgcolor = (.8, 1, .8, 1)
return event.EVENT_HANDLED
@gui.select('.droppable')
def on_drag_leave(widget, x, y, element):
widget.bgcolor = (1, 1, 1, 1)
return event.EVENT_HANDLED
try:
sample = gui.get('#xhtml-sample')
except __HOLE__:
sample = None
if sample:
@layout.select('#click-me')
def on_mouse_press(element, x, y, button, modifiers):
print 'CLICK ON', element
return event.EVENT_HANDLED
sample.label.push_handlers(on_mouse_press)
if gui.has('.progress-me'):
class Progress:
progress = 0
direction = 1
def animate(self, dt):
self.progress += dt * self.direction
if self.progress > 5:
self.progress = 5
self.direction = -1
elif self.progress < 0:
self.progress = 0
self.direction = 1
for e in gui.get('.progress-me'):
e.value = self.progress / 5.
animate_progress = Progress().animate
clock.schedule(animate_progress)
my_escape.has_exit = False
while not (window.has_exit or my_escape.has_exit):
clock.tick()
window.dispatch_events()
media.dispatch_events()
glClearColor(.2, .2, .2, 1)
glClear(GL_COLOR_BUFFER_BIT)
gui.draw()
fps.draw()
window.flip()
if '--once' in sys.argv:
window.close()
sys.exit()
if '--dump' in sys.argv:
print '-'*75
gui.dump()
print '-'*75
if gui.has('.progress-me'):
clock.unschedule(animate_progress)
# reset everything
window.pop_handlers()
gui.delete()
window.set_size(800, 600)
return window.has_exit
|
KeyError
|
dataset/ETHPy150Open ardekantur/pyglet/contrib/wydget/run_tests.py/run
|
407
|
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except __HOLE__:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
|
KeyError
|
dataset/ETHPy150Open wesabe/fixofx/3rdparty/dateutil/zoneinfo/__init__.py/gettz
|
408
|
def ExtractGroup(regex, text, group=1):
"""Extracts a float from a regular expression matched to 'text'.
Args:
regex: string or regexp pattern. Regular expression.
text: string. Text to search.
group: int. Group containing a floating point value. Use '0' for the whole
string.
Returns:
A floating point number matched by 'regex' on 'text'.
Raises:
NoMatchError: when 'regex' does not match 'text'.
IndexError: when 'group' is not present in the match.
"""
match = re.search(regex, text)
if not match:
raise NoMatchError('No match for pattern "{0}" in "{1}"'.format(
regex, text))
try:
return match.group(group)
except __HOLE__:
raise IndexError('No such group {0} in "{1}".'.format(group, regex))
|
IndexError
|
dataset/ETHPy150Open GoogleCloudPlatform/PerfKitBenchmarker/perfkitbenchmarker/regex_util.py/ExtractGroup
|
409
|
def convert_header_to_unicode(header):
def _decode(value, encoding):
if isinstance(value, six.text_type):
return value
if not encoding or encoding == 'unknown-8bit':
encoding = DEFAULT_CHARSET
return value.decode(encoding, 'replace')
try:
return ''.join(
[
(
_decode(bytestr, encoding)
) for bytestr, encoding in email.header.decode_header(header)
]
)
except __HOLE__:
logger.exception(
'Errors encountered decoding header %s into encoding %s.',
header,
DEFAULT_CHARSET,
)
return unicode(header, DEFAULT_CHARSET, 'replace')
|
UnicodeDecodeError
|
dataset/ETHPy150Open coddingtonbear/django-mailbox/django_mailbox/utils.py/convert_header_to_unicode
|
410
|
def get_body_from_message(message, maintype, subtype):
"""
Fetchs the body message matching main/sub content type.
"""
body = six.text_type('')
for part in message.walk():
if part.get_content_maintype() == maintype and \
part.get_content_subtype() == subtype:
charset = part.get_content_charset()
this_part = part.get_payload(decode=True)
if charset:
try:
this_part = this_part.decode(charset, 'replace')
except LookupError:
this_part = this_part.decode('ascii', 'replace')
logger.warning(
'Unknown encoding %s encountered while decoding '
'text payload. Interpreting as ASCII with '
'replacement, but some data may not be '
'represented as the sender intended.',
charset
)
except __HOLE__:
this_part = this_part.decode('ascii', 'replace')
logger.warning(
'Error encountered while decoding text '
'payload from an incorrectly-constructed '
'e-mail; payload was converted to ASCII with '
'replacement, but some data may not be '
'represented as the sender intended.'
)
else:
this_part = this_part.decode('ascii', 'replace')
body += this_part
return body
|
ValueError
|
dataset/ETHPy150Open coddingtonbear/django-mailbox/django_mailbox/utils.py/get_body_from_message
|
411
|
def poll(self):
# get today and tomorrow
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
# get reminder time in datetime format
remtime = datetime.timedelta(minutes=self.remindertime)
# parse khal output for the next seven days
# and get the next event
args = ['khal', 'agenda', '--days', str(self.lookahead)]
cal = subprocess.Popen(args, stdout=subprocess.PIPE)
output = cal.communicate()[0]
output = output.decode()
output = output.split('\n')
caldate = output[0]
try:
if output[0] == 'Today:':
date = str(now.month) + '/' + str(now.day) + '/' + \
str(now.year)
elif output[0] == 'Tomorrow:':
date = str(tomorrow.month) + '/' + str(tomorrow.day) + \
'/' + str(tomorrow.year)
else:
date = output[0]
except IndexError:
return 'No appointments scheduled'
for i in range(1, len(output)):
try:
starttime = dateutil.parser.parse(date + ' ' + output[i][:5],
ignoretz=True)
endtime = dateutil.parser.parse(date + ' ' + output[i][6:11],
ignoretz=True)
except __HOLE__:
date = output[i]
caldate = output[i]
continue
if endtime > now:
data = caldate.replace(':', '') + ' ' + output[i]
break
else:
data = 'No appointments in next ' + \
str(self.lookahead) + ' days'
# get rid of any garbage in appointment added by khal
data = ''.join(filter(lambda x: x in string.printable, data))
# colorize the event if it is within reminder time
if (starttime - remtime <= now) and (endtime > now):
self.foreground = utils.hex(self.reminder_color)
else:
self.foreground = self.default_foreground
return data
|
ValueError
|
dataset/ETHPy150Open qtile/qtile/libqtile/widget/khal_calendar.py/KhalCalendar.poll
|
412
|
def _get_key(self, kh, cmd):
try:
return self.keys[kh]
except __HOLE__:
raise pyhsm.exception.YHSM_CommandFailed(
pyhsm.defines.cmd2str(cmd),
pyhsm.defines.YSM_KEY_HANDLE_INVALID)
|
KeyError
|
dataset/ETHPy150Open Yubico/python-pyhsm/pyhsm/soft_hsm.py/SoftYHSM._get_key
|
413
|
def _populate_cache(self):
"""
Populates the result cache with ``ITER_CHUNK_SIZE`` more entries
(until the cursor is exhausted).
"""
if self._result_cache is None:
self._result_cache = []
if self._has_more:
try:
for i in xrange(ITER_CHUNK_SIZE):
self._result_cache.append(self.next())
except __HOLE__:
self._has_more = False
|
StopIteration
|
dataset/ETHPy150Open MongoEngine/mongoengine/mongoengine/queryset/queryset.py/QuerySet._populate_cache
|
414
|
def __repr__(self):
"""Provides the string representation of the QuerySet
.. versionchanged:: 0.6.13 Now doesnt modify the cursor
"""
if self._iter:
return '.. queryset mid-iteration ..'
data = []
for i in xrange(REPR_OUTPUT_SIZE + 1):
try:
data.append(self.next())
except __HOLE__:
break
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
self.rewind()
return repr(data)
|
StopIteration
|
dataset/ETHPy150Open MongoEngine/mongoengine/mongoengine/queryset/queryset.py/QuerySetNoCache.__repr__
|
415
|
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except __HOLE__:
pass
else:
self.fail("s|t did not screen-out general iterables")
|
TypeError
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_set.py/TestJointOps.test_or
|
416
|
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except __HOLE__:
pass
else:
self.fail("s&t did not screen-out general iterables")
|
TypeError
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_set.py/TestJointOps.test_and
|
417
|
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except __HOLE__:
pass
else:
self.fail("s-t did not screen-out general iterables")
|
TypeError
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_set.py/TestJointOps.test_sub
|
418
|
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except __HOLE__:
pass
else:
self.fail("s^t did not screen-out general iterables")
|
TypeError
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_set.py/TestJointOps.test_xor
|
419
|
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except __HOLE__, e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
|
KeyError
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_set.py/TestSet.test_remove_keyerror_unpacking
|
420
|
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except __HOLE__:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
|
RuntimeError
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_set.py/TestExceptionPropagation.test_changingSizeWhileIterating
|
421
|
def test_update_operator(self):
try:
self.set |= self.other
except __HOLE__:
pass
else:
self.fail("expected TypeError")
|
TypeError
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_set.py/TestOnlySetsInBinaryOps.test_update_operator
|
422
|
def test_intersection_update_operator(self):
try:
self.set &= self.other
except __HOLE__:
pass
else:
self.fail("expected TypeError")
|
TypeError
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_set.py/TestOnlySetsInBinaryOps.test_intersection_update_operator
|
423
|
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except __HOLE__:
pass
else:
self.fail("expected TypeError")
|
TypeError
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_set.py/TestOnlySetsInBinaryOps.test_sym_difference_update_operator
|
424
|
def test_difference_update_operator(self):
try:
self.set -= self.other
except __HOLE__:
pass
else:
self.fail("expected TypeError")
|
TypeError
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/test/test_set.py/TestOnlySetsInBinaryOps.test_difference_update_operator
|
425
|
@contextlib.contextmanager
def uncache(*names):
"""Uncache a module from sys.modules.
A basic sanity check is performed to prevent uncaching modules that either
cannot/shouldn't be uncached.
"""
for name in names:
if name in ('sys', 'marshal', 'imp'):
raise ValueError(
"cannot uncache {0} as it will break _importlib".format(name))
try:
del sys.modules[name]
except __HOLE__:
pass
try:
yield
finally:
for name in names:
try:
del sys.modules[name]
except KeyError:
pass
|
KeyError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_importlib.py/uncache
|
426
|
def __init__(self):
m = mapnik.Map(256, 256)
try:
mapnik.load_map(m, str(self.mapfile))
except __HOLE__:
pass
m.buffer_size = 128
m.srs = '+init=epsg:3857'
self.map = m
|
RuntimeError
|
dataset/ETHPy150Open bkg/django-spillway/spillway/carto.py/Map.__init__
|
427
|
def layer(self, queryset, stylename=None):
cls = VectorLayer if hasattr(queryset, 'geojson') else RasterLayer
layer = cls(queryset)
stylename = stylename or layer.stylename
try:
style = self.map.find_style(stylename)
except __HOLE__:
self.map.append_style(stylename, layer.style())
layer.styles.append(stylename)
self.map.layers.append(layer._layer)
return layer
|
KeyError
|
dataset/ETHPy150Open bkg/django-spillway/spillway/carto.py/Map.layer
|
428
|
def get_logging_fields(self, model):
"""
Returns a dictionary mapping of the fields that are used for
keeping the acutal audit log entries.
"""
rel_name = '_%s_audit_log_entry'%model._meta.object_name.lower()
def entry_instance_to_unicode(log_entry):
try:
result = '%s: %s %s at %s'%(model._meta.object_name,
log_entry.object_state,
log_entry.get_action_type_display().lower(),
log_entry.action_date,
)
except __HOLE__:
result = '%s %s at %s'%(model._meta.object_name,
log_entry.get_action_type_display().lower(),
log_entry.action_date
)
return result
action_user_field = LastUserField(related_name = rel_name, editable = False)
#check if the manager has been attached to auth user model
if [model._meta.app_label, model.__name__] == getattr(settings, 'AUTH_USER_MODEL', 'auth.User').split("."):
action_user_field = LastUserField(related_name = rel_name, editable = False, to = 'self')
return {
'action_id' : models.AutoField(primary_key = True),
'action_date' : models.DateTimeField(default = datetime_now, editable = False, blank=False),
'action_user' : action_user_field,
'action_type' : models.CharField(max_length = 1, editable = False, choices = (
('I', _('Created')),
('U', _('Changed')),
('D', _('Deleted')),
)),
'object_state' : LogEntryObjectDescriptor(model),
'__unicode__' : entry_instance_to_unicode,
}
|
AttributeError
|
dataset/ETHPy150Open Atomidata/django-audit-log/audit_log/models/managers.py/AuditLog.get_logging_fields
|
429
|
def validate():
"""
Validates the wercker.json file by doing the following:
* Check whether there is a git repository in the current directory or up.
* Check whether there is a wercker.json file in that root.
* Check whether the size of that file is greater that zero.
* Check whether the wercker.json file contains valid json.
Currently this command doesn't validate the wercker.json file against
a schema. But you can expect this in the future.
"""
term = get_term()
git_root_path = find_git_root(os.curdir)
if not git_root_path:
puts(term.red("Error: ") + "Could not find a git repository")
return
wercker_json_path = os.path.join(git_root_path, "wercker.json")
if os.path.exists(wercker_json_path) is False:
puts(term.yellow("Warning: ") + " Could not find a wercker.json file")
return
if os.path.getsize(wercker_json_path) == 0:
puts(term.red("Error: ") + "wercker.json is found, but empty")
return
try:
with open(wercker_json_path) as f:
try:
json.load(f)
puts(term.green("wercker.json is found and valid!"))
except __HOLE__ as e:
puts(term.red("Error: ") + "wercker.json is not valid json: " +
e.message)
except IOError as e:
puts(term.red("Error: ") + "Error while reading wercker.json file: " +
e.message)
|
ValueError
|
dataset/ETHPy150Open wercker/wercker-cli/werckercli/commands/validate.py/validate
|
430
|
def test_passes_through_unhandled_errors(self):
try:
with self.breaker:
raise RuntimeError("error")
except __HOLE__:
self.assertEquals(len(self.breaker.errors), 0)
else:
self.assertTrue(False, "exception not raised")
|
RuntimeError
|
dataset/ETHPy150Open edgeware/python-circuit/circuit/test/test_breaker.py/CircuitBreakerTestCase.test_passes_through_unhandled_errors
|
431
|
def test_catches_handled_errors(self):
try:
with self.breaker:
raise IOError("error")
except __HOLE__:
self.assertEquals(len(self.breaker.errors), 1)
else:
self.assertTrue(False, "exception not raised")
|
IOError
|
dataset/ETHPy150Open edgeware/python-circuit/circuit/test/test_breaker.py/CircuitBreakerTestCase.test_catches_handled_errors
|
432
|
@view_config(context=Root, request_method='POST', subpath=(), renderer='json')
@view_config(context=SimpleResource, request_method='POST', subpath=(),
renderer='json')
@argify
def upload(request, content, name=None, version=None):
""" Handle update commands """
action = request.param(':action', 'file_upload')
# Direct uploads from the web UI go here, and don't have a name/version
if name is None or version is None:
name, version = parse_filename(content.filename)
else:
name = normalize_name(name)
if action == 'file_upload':
if not request.access.has_permission(name, 'write'):
return request.forbid()
try:
return request.db.upload(content.filename, content.file, name=name,
version=version)
except __HOLE__ as e:
return HTTPBadRequest(*e.args)
else:
return HTTPBadRequest("Unknown action '%s'" % action)
|
ValueError
|
dataset/ETHPy150Open mathcamp/pypicloud/pypicloud/views/simple.py/upload
|
433
|
@property
def message(self):
try:
return self.args[0]
except __HOLE__:
return ""
|
IndexError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/HTTPError.message
|
434
|
def __delitem__(self, header):
try:
del self.headers[header]
except __HOLE__:
pass
|
IndexError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/HttpResponse.__delitem__
|
435
|
def close(self):
try:
self._container.close()
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/HttpResponse.close
|
436
|
def __getitem__(self, key):
for d in (self.POST, self.GET):
try:
return d[key]
except __HOLE__:
pass
raise KeyError("%s not found in either POST or GET" % key)
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/HTTPRequest.__getitem__
|
437
|
def _get_get(self):
try:
return self._get
except __HOLE__:
# The WSGI spec says 'QUERY_STRING' may be absent.
self._get = urlparse.queryparse(self.environ.get(b'QUERY_STRING', b''))
return self._get
|
AttributeError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/HTTPRequest._get_get
|
438
|
def _get_post(self):
try:
return self._post
except __HOLE__:
self._parse_post_content()
return self._post
|
AttributeError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/HTTPRequest._get_post
|
439
|
def _get_cookies(self):
try:
return self._cookies
except __HOLE__:
self._cookies = cookies = {}
for cookie in httputils.parse_cookie(self.environ.get(b'HTTP_COOKIE', b'')):
cookies[cookie.name] = cookie.value
return cookies
|
AttributeError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/HTTPRequest._get_cookies
|
440
|
def _get_files(self):
try:
return self._files
except __HOLE__:
self._parse_post_content()
return self._files
|
AttributeError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/HTTPRequest._get_files
|
441
|
def _get_raw_post_data(self):
try:
content_length = int(self.environ.get(b"CONTENT_LENGTH"))
except __HOLE__: # if CONTENT_LENGTH was empty string or not an integer
raise HttpErrorLengthRequired("A Content-Length header is required.")
return self.environ[b'wsgi.input'].read(content_length)
|
ValueError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/HTTPRequest._get_raw_post_data
|
442
|
def _get_headers(self):
try:
return self._headers
except __HOLE__:
self._headers = hdrs = httputils.Headers()
for k, v in self.environ.iteritems():
if k.startswith(b"HTTP"):
hdrs.append(httputils.make_header(k[5:].replace(b"_", b"-").lower(), v))
return self._headers
|
AttributeError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/HTTPRequest._get_headers
|
443
|
def unregister(self, method):
if isinstance(method, basestring):
method = get_method(method)
try:
m = self._reverse[method]
except __HOLE__:
return # not registered anyway
else:
del self._reverse[method]
i = 0
for urlmap in self._patterns:
if urlmap._method is m:
break
else:
i += 1
del self._patterns[i]
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/URLResolver.unregister
|
444
|
def get_url(self, method, **kwargs):
"""Reverse mapping. Answers the question: How do I reach the
callable object mapped to in the LOCATIONMAP?
"""
if isinstance(method, basestring):
if "." in method:
method = get_method(method)
else:
try:
urlmap = self._aliases[method]
except __HOLE__:
raise InvalidPath("Alias not registered")
return urlmap.get_url(**kwargs)
try:
urlmap = self._reverse[method]
except KeyError:
raise InvalidPath("Method %r not registered." % (method,))
return self._urlbase + urlmap.get_url(**kwargs)
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/URLResolver.get_url
|
445
|
def get_alias(self, name, **kwargs):
try:
urlmap = self._aliases[name]
except __HOLE__:
raise InvalidPath("Alias not registered")
return urlmap.get_url(**kwargs)
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/URLResolver.get_alias
|
446
|
def __call__(self, request, **kwargs):
meth = self._methods.get(request.method, self._invalid)
try:
return meth(request, **kwargs)
except __HOLE__:
return HttpResponseNotAllowed(self._implemented)
|
NotImplementedError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/RequestHandler.__call__
|
447
|
def get(self, request, function):
try:
handler = self._mapping[function]
except __HOLE__:
request.log_error("No JSON handler for %r.\n" % function)
return JSON404()
kwargs = JSONQuery(request)
try:
return JSONResponse(handler(request, **kwargs))
except:
ex, val, tb = sys.exc_info()
tblist = traceback.extract_tb(tb)
del tb
request.log_error("JSON handler error: %s: %s\n" % (ex, val))
return JSONServerError(ex, val, tblist)
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/JSONRequestHandler.get
|
448
|
def get_large_icon(self, name):
try:
namepair = self.config.ICONMAP["large"][name]
except __HOLE__:
namepair = self.config.ICONMAP["large"]["default"]
return self._doc.nodemaker(b"Img", {"src": self.resolver.get_url("images", name=namepair[1]),
"alt":name, "width":"24", "height":"24"})
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/ResponseDocument.get_large_icon
|
449
|
def get_medium_icon(self, name):
try:
filename = self.config.ICONMAP["medium"][name]
except __HOLE__:
filename = self.config.ICONMAP["medium"]["default"]
return self._doc.nodemaker(b"Img", {"src": self.resolver.get_url("images", name=filename),
"alt":name, "width":"16", "height":"16"})
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/ResponseDocument.get_medium_icon
|
450
|
def get_small_icon(self, name):
try:
filename = self.config.ICONMAP["small"][name]
except __HOLE__:
filename = self.config.ICONMAP["small"]["default"]
return self._doc.nodemaker(b"Img", {"src": self.resolver.get_url("images", name=filename),
"alt":name, "width":"10", "height":"10"})
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/ResponseDocument.get_small_icon
|
451
|
def _get_module(name):
try:
return sys.modules[name]
except __HOLE__:
pass
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/WWW/pycopia/WWW/framework.py/_get_module
|
452
|
def handle(self, requests):
logging.debug("[zmq] <~ self%s" % ''.join([format_method(*req) for req in requests]))
# loop request chain
module = self.server.module
result = module
parsed = module.name
for method, args, kwargs in requests:
# parse request
try:
if method == '__dir':
result = dir(result, *args, **kwargs)
elif method == '__len':
result = len(result, *args, **kwargs)
elif method == '__set':
result = setattr(result, *args, **kwargs)
elif method == '__del':
result = delattr(result, *args, **kwargs)
else:
try: result = getattr(result, method)
except AttributeError:
parsed += '.' + method
raise
else:
parsed += format_method(method, args, kwargs)
result = result(*args, **kwargs)
except __HOLE__:
msg = 'AttributeError: \'%s\'' % parsed
logging.error(msg)
module.alert(msg)
raise ReqError(parsed)
except PyscaleError as ex:
msg = ''.join(traceback.format_exception_only(type(ex), ex)).strip()
logging.error(msg)
module.alert(msg)
raise ReqError(parsed)
except Exception as ex:
msg = traceback.format_exc()
logging.exception(msg)
module.error(msg)
raise ReqError(parsed)
return result
|
AttributeError
|
dataset/ETHPy150Open alexcepoi/pyscale/pyscale/zmq/rpc.py/RpcWorker.handle
|
453
|
def do_get_member_for(parser, token):
try:
# Splitting by None == splitting by spaces.
tag_name, arg = token.contents.split(None, 1)
except __HOLE__:
raise template.TemplateSyntaxError, "%r tag requires arguments" % token.contents.split()[0]
m = re.search(r'(\w+) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError, "%r tag had invalid arguments" % tag_name
object_var_name, return_var_name = m.groups()
return GetMemberFor(object_var_name, return_var_name)
|
ValueError
|
dataset/ETHPy150Open ofri/Open-Knesset/mks/templatetags/mks_tags.py/do_get_member_for
|
454
|
@staticmethod
def get_board(id_):
try:
return DeviceService.get_boards()[id_]
except __HOLE__:
raise BoardUnknownId(id_)
|
KeyError
|
dataset/ETHPy150Open smartanthill/smartanthill1_0/smartanthill/device/service.py/DeviceService.get_board
|
455
|
def test_is_forest():
# generate chain
chain = np.c_[np.arange(1, 10), np.arange(9)]
assert_true(is_forest(chain, len(chain) + 1))
assert_true(is_forest(chain))
# generate circle
circle = np.vstack([chain, [9, 0]])
assert_false(is_forest(circle))
assert_false(is_forest(circle, len(chain) + 1))
# union of two disjoint chains
two_chains = np.vstack([chain, chain + 10])
assert_true(is_forest(two_chains, 20))
# union of chain and circle
disco_graph = np.vstack([chain, circle + 10])
assert_false(is_forest(disco_graph))
# generate random fully connected graph
graph = np.random.uniform(size=(10, 10))
edges = np.c_[graph.nonzero()]
assert_false(is_forest(edges))
try:
from scipy.sparse.csgraph import minimum_spanning_tree
tree = minimum_spanning_tree(sparse.csr_matrix(graph))
tree_edges = np.c_[tree.nonzero()]
assert_true(is_forest(tree_edges, 10))
assert_true(is_forest(tree_edges))
except __HOLE__:
pass
|
ImportError
|
dataset/ETHPy150Open pystruct/pystruct/pystruct/tests/test_inference/test_maxprod.py/test_is_forest
|
456
|
def _format_peer_cidrs(ipsec_site_connection):
try:
return '\n'.join([jsonutils.dumps(cidrs) for cidrs in
ipsec_site_connection['peer_cidrs']])
except (TypeError, __HOLE__):
return ''
|
KeyError
|
dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/neutron/v2_0/vpn/ipsec_site_connection.py/_format_peer_cidrs
|
457
|
def _read_incdec(self, reader):
response_line = reader.read_line()
try:
return MemcacheResult.OK, int(response_line)
except __HOLE__:
return MemcacheResult.get(response_line), None
|
ValueError
|
dataset/ETHPy150Open hrosenhorn/gevent-memcache/lib/geventmemcache/protocol.py/MemcacheTextProtocol._read_incdec
|
458
|
def readfileintodict(filename, d):
''' Read key=value pairs from a file, into a dict.
Skip comments; strip newline characters and spacing.
'''
with open(filename, 'r') as f:
lines = f.readlines()
for l in lines:
if l[:1] == '#':
continue
try:
key, value = l.split('=', 1)
d[key] = value.strip()
except __HOLE__:
pass
|
ValueError
|
dataset/ETHPy150Open coreemu/core/daemon/core/misc/utils.py/readfileintodict
|
459
|
def get_object(self):
try:
return self._object
except __HOLE__:
transfer_id = self.kwargs['transfer_id']
try:
self._object = cinder.transfer_get(self.request, transfer_id)
return self._object
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume transfer.'))
|
AttributeError
|
dataset/ETHPy150Open openstack/horizon/openstack_dashboard/dashboards/project/volumes/volumes/views.py/ShowTransferView.get_object
|
460
|
def start(self):
"""Start the app for the stop subcommand."""
try:
pid = self.get_pid_from_file()
except PIDFileError:
self.log.critical(
'Could not read pid file, cluster is probably not running.'
)
# Here I exit with a unusual exit status that other processes
# can watch for to learn how I existed.
self.remove_pid_file()
self.exit(ALREADY_STOPPED)
if not self.check_pid(pid):
self.log.critical(
'Cluster [pid=%r] is not running.' % pid
)
self.remove_pid_file()
# Here I exit with a unusual exit status that other processes
# can watch for to learn how I existed.
self.exit(ALREADY_STOPPED)
elif os.name=='posix':
sig = self.signal
self.log.info(
"Stopping cluster [pid=%r] with [signal=%r]" % (pid, sig)
)
try:
os.kill(pid, sig)
except __HOLE__:
self.log.error("Stopping cluster failed, assuming already dead.",
exc_info=True)
self.remove_pid_file()
elif os.name=='nt':
try:
# kill the whole tree
p = check_call(['taskkill', '-pid', str(pid), '-t', '-f'], stdout=PIPE,stderr=PIPE)
except (CalledProcessError, OSError):
self.log.error("Stopping cluster failed, assuming already dead.",
exc_info=True)
self.remove_pid_file()
|
OSError
|
dataset/ETHPy150Open ipython/ipython-py3k/IPython/parallel/apps/ipclusterapp.py/IPClusterStop.start
|
461
|
def build_launcher(self, clsname):
"""import and instantiate a Launcher based on importstring"""
if '.' not in clsname:
# not a module, presume it's the raw name in apps.launcher
clsname = 'IPython.parallel.apps.launcher.'+clsname
# print repr(clsname)
try:
klass = import_item(clsname)
except (__HOLE__, KeyError):
self.log.fatal("Could not import launcher class: %r"%clsname)
self.exit(1)
launcher = klass(
work_dir='.', config=self.config, log=self.log
)
return launcher
|
ImportError
|
dataset/ETHPy150Open ipython/ipython-py3k/IPython/parallel/apps/ipclusterapp.py/IPClusterEngines.build_launcher
|
462
|
def start(self):
"""Start the app for the engines subcommand."""
self.log.info("IPython cluster: started")
# First see if the cluster is already running
# Now log and daemonize
self.log.info(
'Starting engines with [daemon=%r]' % self.daemonize
)
# TODO: Get daemonize working on Windows or as a Windows Server.
if self.daemonize:
if os.name=='posix':
daemonize()
dc = ioloop.DelayedCallback(self.start_engines, 0, self.loop)
dc.start()
# Now write the new pid file AFTER our new forked pid is active.
# self.write_pid_file()
try:
self.loop.start()
except __HOLE__:
pass
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
pass
else:
raise
|
KeyboardInterrupt
|
dataset/ETHPy150Open ipython/ipython-py3k/IPython/parallel/apps/ipclusterapp.py/IPClusterEngines.start
|
463
|
def start(self):
"""Start the app for the start subcommand."""
# First see if the cluster is already running
try:
pid = self.get_pid_from_file()
except PIDFileError:
pass
else:
if self.check_pid(pid):
self.log.critical(
'Cluster is already running with [pid=%s]. '
'use "ipcluster stop" to stop the cluster.' % pid
)
# Here I exit with a unusual exit status that other processes
# can watch for to learn how I existed.
self.exit(ALREADY_STARTED)
else:
self.remove_pid_file()
# Now log and daemonize
self.log.info(
'Starting ipcluster with [daemon=%r]' % self.daemonize
)
# TODO: Get daemonize working on Windows or as a Windows Server.
if self.daemonize:
if os.name=='posix':
daemonize()
dc = ioloop.DelayedCallback(self.start_controller, 0, self.loop)
dc.start()
dc = ioloop.DelayedCallback(self.start_engines, 1000*self.delay, self.loop)
dc.start()
# Now write the new pid file AFTER our new forked pid is active.
self.write_pid_file()
try:
self.loop.start()
except __HOLE__:
pass
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
pass
else:
raise
finally:
self.remove_pid_file()
|
KeyboardInterrupt
|
dataset/ETHPy150Open ipython/ipython-py3k/IPython/parallel/apps/ipclusterapp.py/IPClusterStart.start
|
464
|
def parse_lookup(self, lookup):
try:
section, name = lookup.split(
preferences_settings.SECTION_KEY_SEPARATOR)
except __HOLE__:
name = lookup
section = None
return section, name
|
ValueError
|
dataset/ETHPy150Open EliotBerriot/django-dynamic-preferences/dynamic_preferences/managers.py/PreferencesManager.parse_lookup
|
465
|
def load_from_db(self):
"""Return a dictionary of preferences by section directly from DB"""
a = {}
db_prefs = {p.preference.identifier(): p for p in self.queryset}
for preference in self.registry.preferences():
try:
db_pref = db_prefs[preference.identifier()]
except __HOLE__:
db_pref = self.create_db_pref(
section=preference.section.name, name=preference.name, value=preference.default)
self.to_cache(db_pref)
a[preference.identifier()] = self.from_cache(
preference.section.name, preference.name)
return a
|
KeyError
|
dataset/ETHPy150Open EliotBerriot/django-dynamic-preferences/dynamic_preferences/managers.py/PreferencesManager.load_from_db
|
466
|
def get(self, *args, **kwargs):
angular_modules = []
js_files = []
for app_config in apps.get_app_configs():
# Add the angular app, if the module has one.
if getattr(app_config,
'angular_{}_module'.format(kwargs.get('openslides_app')),
False):
angular_modules.append('OpenSlidesApp.{app_name}.{app}'.format(
app=kwargs.get('openslides_app'),
app_name=app_config.label))
# Add all js files that the module needs
try:
app_js_files = app_config.js_files
except __HOLE__:
# The app needs no js-files
pass
else:
js_files += [
'{static}{path}'.format(
static=settings.STATIC_URL,
path=path)
for path in app_js_files]
# Use javascript loadScript function from
# http://balpha.de/2011/10/jquery-script-insertion-and-its-consequences-for-debugging/
return HttpResponse(
"""
var loadScript = function (path) {
var result = $.Deferred(),
script = document.createElement("script");
script.async = "async";
script.type = "text/javascript";
script.src = path;
script.onload = script.onreadystatechange = function(_, isAbort) {
if (!script.readyState || /loaded|complete/.test(script.readyState)) {
if (isAbort)
result.reject();
else
result.resolve();
}
};
script.onerror = function () { result.reject(); };
$("head")[0].appendChild(script);
return result.promise();
};
""" +
"""
angular.module('OpenSlidesApp.{app}', {angular_modules});
var deferres = [];
{js_files}.forEach( function(js_file) {{ deferres.push(loadScript(js_file)); }} );
$.when.apply(this,deferres).done(function() {{
angular.bootstrap(document,['OpenSlidesApp.{app}']);
}} );
"""
.format(
app=kwargs.get('openslides_app'),
angular_modules=angular_modules,
js_files=js_files))
# Viewsets for the REST API
|
AttributeError
|
dataset/ETHPy150Open OpenSlides/OpenSlides/openslides/core/views.py/AppsJsView.get
|
467
|
@detail_route(methods=['post'])
def update_elements(self, request, pk):
"""
REST API operation to update projector elements. It expects a POST
request to /rest/core/projector/<pk>/update_elements/ with a
dictonary to update the projector config. This must be a dictionary
with UUIDs as keys and projector element dictionaries as values.
Example:
{
"191c0878cdc04abfbd64f3177a21891a": {
"name": "core/countdown",
"stable": true,
"status": "running",
"countdown_time": 1374321600.0,
"visable": true,
"default": 42
}
}
"""
if not isinstance(request.data, dict):
raise ValidationError({'detail': 'Data must be a dictionary.'})
error = {'detail': 'Data must be a dictionary with UUIDs as keys and dictionaries as values.'}
for key, value in request.data.items():
try:
uuid.UUID(hex=str(key))
except __HOLE__:
raise ValidationError(error)
if not isinstance(value, dict):
raise ValidationError(error)
projector_instance = self.get_object()
projector_config = projector_instance.config
for key, value in request.data.items():
if key not in projector_config:
raise ValidationError({'detail': 'Invalid projector element. Wrong UUID.'})
projector_config[key].update(request.data[key])
serializer = self.get_serializer(projector_instance, data={'config': projector_config}, partial=False)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
|
ValueError
|
dataset/ETHPy150Open OpenSlides/OpenSlides/openslides/core/views.py/ProjectorViewSet.update_elements
|
468
|
@detail_route(methods=['post'])
def deactivate_elements(self, request, pk):
"""
REST API operation to deactivate projector elements. It expects a
POST request to /rest/core/projector/<pk>/deactivate_elements/ with
a list of hex UUIDs. These are the projector_elements in the config
that should be deleted.
"""
if not isinstance(request.data, list):
raise ValidationError({'detail': 'Data must be a list of hex UUIDs.'})
for item in request.data:
try:
uuid.UUID(hex=str(item))
except __HOLE__:
raise ValidationError({'detail': 'Data must be a list of hex UUIDs.'})
projector_instance = self.get_object()
projector_config = projector_instance.config
for key in request.data:
try:
del projector_config[key]
except KeyError:
raise ValidationError({'detail': 'Invalid UUID.'})
serializer = self.get_serializer(projector_instance, data={'config': projector_config}, partial=False)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
|
ValueError
|
dataset/ETHPy150Open OpenSlides/OpenSlides/openslides/core/views.py/ProjectorViewSet.deactivate_elements
|
469
|
@staticmethod
def try_login(username, password):
conn = get_ldap_connection(configuration.get("ldap", "bind_user"), configuration.get("ldap", "bind_password"))
search_filter = "(&({0})({1}={2}))".format(
configuration.get("ldap", "user_filter"),
configuration.get("ldap", "user_name_attr"),
username
)
search_scopes = {
"LEVEL": LEVEL,
"SUBTREE": SUBTREE,
"BASE": BASE
}
search_scope = LEVEL
if configuration.has_option("ldap", "search_scope"):
search_scope = SUBTREE if configuration.get("ldap", "search_scope") == "SUBTREE" else LEVEL
# todo: BASE or ONELEVEL?
res = conn.search(configuration.get("ldap", "basedn"), search_filter, search_scope=search_scope)
# todo: use list or result?
if not res:
LOG.info("Cannot find user %s", username)
raise AuthenticationError("Invalid username or password")
entry = conn.response[0]
conn.unbind()
if not 'dn' in entry:
# The search fitler for the user did not return any values, so an
# invalid user was used for credentials.
raise AuthenticationError("Invalid username or password")
try:
conn = get_ldap_connection(entry['dn'], password)
except __HOLE__ as e:
LOG.error("""
Unable to parse LDAP structure. If you're using Active Directory and not specifying an OU, you must set search_scope=SUBTREE in airflow.cfg.
%s
""" % traceback.format_exc())
raise LdapException("Could not parse LDAP structure. Try setting search_scope in airflow.cfg, or check logs")
if not conn:
LOG.info("Password incorrect for user %s", username)
raise AuthenticationError("Invalid username or password")
|
KeyError
|
dataset/ETHPy150Open airbnb/airflow/airflow/contrib/auth/backends/ldap_auth.py/LdapUser.try_login
|
470
|
def parse_datetime(d):
"""
Parse a datetime as formatted in one of the following formats:
date: %Y-%m-%d'
datetime: '%Y-%m-%d %H:%M:%S'
datetime with microseconds: '%Y-%m-%d %H:%M:%S.%f'
Can also handle a datetime.date or datetime.datetime object,
(or anything that has year, month and day attributes)
and converts it to datetime.datetime
"""
if hasattr(d, "year") and hasattr(d, "month") and hasattr(d, "day"):
return datetime.datetime(d.year, d.month, d.day)
try:
return datetime.datetime.strptime(
d, AnalysisClientBase.DATETIME_MSEC_FMT)
except ValueError: pass
try:
return datetime.datetime.strptime(d, AnalysisClientBase.DATETIME_FMT)
except ValueError: pass
try:
return datetime.datetime.strptime(d, AnalysisClientBase.DATE_FMT)
except __HOLE__:
raise ValueError("Date '%s' does not match format '%s'" % (
d, "%Y-%m-%d[ %H:%M:%S[.%f]]'"))
|
ValueError
|
dataset/ETHPy150Open StackStorm/st2contrib/packs/lastline/actions/lib/analysis_apiclient.py/parse_datetime
|
471
|
def get_completed(self, after, before):
"""
Return scores of tasks completed in the specified time range.
This takes care of using the analysis API's pagination
to make sure it gets all tasks.
:param after: datetime.datetime
:param before: datetime.datetime
:yield: sequence of `CompletedTask`
:raise: InvalidAnalysisAPIResponse if response
does not have the format we expect
"""
try:
while True:
result = self.__analysis_client.get_completed(
after=after,
before=before,
include_score=True)
data = result["data"]
tasks = data["tasks"]
if not tasks:
break
for task_uuid, score in tasks.iteritems():
yield CompletedTask(task_uuid=task_uuid,
score=score)
more = int(data["more_results_available"])
if not more:
break
last_ts = parse_datetime(data["before"])
if last_ts >= before:
break
after = last_ts
except (KeyError, ValueError, __HOLE__, AttributeError):
# attributeError needed in case iteritems is missing (not a dict)
# let's give it the trace of the original exception, so we know
# what the specific problem is!
trace = sys.exc_info()[2]
raise InvalidAnalysisAPIResponse("Unable to parse response to get_completed()"), None, trace
|
TypeError
|
dataset/ETHPy150Open StackStorm/st2contrib/packs/lastline/actions/lib/analysis_apiclient.py/TaskCompletion.get_completed
|
472
|
def init_shell(banner):
"""Set up the iPython shell."""
try:
#this import can fail, that's why it's in a try block!
#pylint: disable=E0611
#pylint: disable=F0401
from IPython.frontend.terminal.embed import InteractiveShellEmbed #@UnresolvedImport
#pylint: enable=E0611
#pylint: enable=F0401
shell = InteractiveShellEmbed(banner1=banner)
except __HOLE__: # iPython < 0.11
# iPython <0.11 does have a Shell member
shell = IPython.Shell.IPShellEmbed() #pylint: disable=E1101
shell.set_banner(banner)
return shell
|
ImportError
|
dataset/ETHPy150Open StackStorm/st2contrib/packs/lastline/actions/lib/analysis_apiclient.py/init_shell
|
473
|
def get_style(style):
from markdown_deux.conf import settings
try:
return settings.MARKDOWN_DEUX_STYLES[style]
except __HOLE__:
return settings.MARKDOWN_DEUX_STYLES.get("default",
settings.MARKDOWN_DEUX_DEFAULT_STYLE)
|
KeyError
|
dataset/ETHPy150Open trentm/django-markdown-deux/lib/markdown_deux/__init__.py/get_style
|
474
|
def test_no_style_xml():
try:
import openpyxl
except __HOLE__:
raise nose.SkipTest('openpyxl not installed')
data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] # data is a 2D array
filename = get_output_path("no_style.xlsx")
sheetname = "test"
wb = Workbook()
wb.new_sheet(sheetname, data=data)
wb.save(filename)
wbr = openpyxl.reader.excel.load_workbook(filename=filename,use_iterators=True)
mySheet = wbr.get_sheet_by_name(sheetname)
|
ImportError
|
dataset/ETHPy150Open kz26/PyExcelerate/pyexcelerate/tests/test_Style.py/test_no_style_xml
|
475
|
def call(self, path, query=None, method='GET', data=None,
files=None, get_all_pages=False, complete_response=False,
**kwargs):
"""Make a REST call to the Zendesk web service.
Parameters:
path - Path portion of the Zendesk REST endpoint URL.
query - Query parameters in dict form.
method - HTTP method to use in making the request.
data - POST data or multi-part form data to include.
files - Requests style dict of files for multi-part file uploads.
get_all_pages - Make multiple requests and follow next_page.
complete_response - Return raw request results.
"""
# Support specifying a mime-type other than application/json
mime_type = kwargs.pop('mime_type', 'application/json')
for key in kwargs.keys():
value = kwargs[key]
if hasattr(value, '__iter__') and not isinstance(value, str):
kwargs[key] = ','.join(map(str, value))
if query:
if kwargs:
kwargs.update(query)
else:
kwargs = query
url = self.zdesk_url + path
if files:
# Sending multipart file. data contains parameters.
json = None
self.headers.pop('Content-Type', None)
elif (mime_type == 'application/json' and
(method == 'POST' or method == 'PUT')):
# Sending JSON data.
json = data
data = {}
self.headers.pop('Content-Type', None)
elif (mime_type != 'application/json' and
(method == 'POST' or method == 'PUT')):
# Uploading an attachment, probably.
# Specifying the MIME type is required.
json = None
self.headers['Content-Type'] = mime_type
else:
# Probably a GET or DELETE. Not sending JSON or files.
json = None
self.headers.pop('Content-Type', None)
results = []
all_requests_complete = False
while not all_requests_complete:
# Make an http request
response = self.client.request(method,
url,
params=kwargs,
json=json,
data=data,
headers=self.headers,
files=files,
**self.client_args)
# If the response status is not in the 200 range then assume an
# error and raise proper exception
if response.status_code < 200 or response.status_code > 299:
if response.status_code == 401:
raise AuthenticationError(
response.content, response.status_code, response)
elif response.status_code == 429:
# FYI: Check the Retry-After header for how
# many seconds to sleep
raise RateLimitError(
response.content, response.status_code, response)
else:
raise ZendeskError(
response.content, response.status_code, response)
if response.content.strip():
content = response.json()
# set url to the next page if that was returned in the response
url = content.get('next_page', None)
else:
content = response.content
url = None
if complete_response:
results.append({
'response': response,
'content': content,
'status': response.status_code
})
else:
# Deserialize json content if content exists.
# In some cases Zendesk returns ' ' strings.
# Also return false non strings (0, [], (), {})
if response.headers.get('location'):
# Zendesk's response is sometimes the url of a newly
# created user/ticket/group/etc and they pass this through
# 'location'. Otherwise, the body of 'content'
# has our response.
results.append(response.headers.get('location'))
elif content:
results.append(content)
else:
results.append(responses[response.status_code])
# if there is a next_page, and we are getting pages, then continue
# making requests
all_requests_complete = not (get_all_pages and url)
if get_all_pages and complete_response:
# Return the list of results from all calls made.
# This way even if only one page was present the caller will
# always receive back an iterable value, since multiple pages
# were requested/expected. This also provides the information for
# every call, and saves us from having to try to combine all of
# that ourselves in a sensible way.
return results
if len(results) == 1:
# regardless as to whether all pages were requested, there was
# only one call and set of results, so just send it back.
return results[0]
# Now we need to try to combine or reduce the results:
hashable = True
try:
if len(set(results)) == 1:
# all responses were the same, so return just the first one.
# may have a list of locations or response statuses
return results[0]
except TypeError:
# probably we have a list of content dictionaries.
hashable = False
if hashable:
# we have a list of simple objects like strings, but they are not
# all the same so send them all back.
return results
# may have a sequence of response contents
# (dicts, possibly lists in the future as that is valid json also)
combined_dict_results = {}
combined_list_results = []
for result in results:
if isinstance(result, list):
# the result of this call returned a list.
# extend the combined list with these results.
combined_list_results.extend(result)
elif isinstance(result, dict):
# the result of this call returned a dict. the dict probably
# has both simple attributes (strings) and complex attributes
# (lists). if the attribute is a list, we will extend the
# combined attribute, otherwise we will just take the last
# attribute value from the last call.
# the end result is a response that looks like one giant call,
# to e.g. list tickets, but was actually made by multiple API
# calls.
for k in result.keys():
v = result[k]
if isinstance(v, list):
try:
combined_dict_results[k].extend(v)
except __HOLE__:
combined_dict_results[k] = v
else:
combined_dict_results[k] = v
else:
# returned result is not a dict or a list. don't know how to
# deal with this, so just send everything back.
return results
if combined_list_results and combined_dict_results:
# there was a mix of list and dict results from the sequence
# of calls. this case seems very odd to me if it ever happens.
# at any rate, send everything back uncombined
return results
if combined_dict_results:
return combined_dict_results
if combined_list_results:
return combined_list_results
# I don't expect to make it here, but I suppose it could happen if,
# perhaps, a sequence of empty dicts were returned or some such.
# Send everything back.
return results
|
KeyError
|
dataset/ETHPy150Open fprimex/zdesk/zdesk/zdesk.py/Zendesk.call
|
476
|
def parse_soup(content):
try:
soup = BeautifulSoup(content, convertEntities=BeautifulSoup.HTML_ENTITIES)
return soup
except __HOLE__, e:
logger.error("%d: %s" % (e.code, e.msg))
return
|
HTTPError
|
dataset/ETHPy150Open numb3r3/crawler-python/crawler/core/base.py/parse_soup
|
477
|
def get_response(url, proxies=None):
try:
if proxies:
if url.startswith('http:') and 'http' in proxies:
prox = proxies['http']
if prox.startswith('socks'):
session.proxies = proxies
r = session.get(url)
else: # http proxy
r = requests.get(url, proxies = proxies)
elif url.startswith('https:') and 'https' in proxies:
prox = proxies['https']
if prox.startswith('socks'):
session.proxies = proxies
r = session.get(url)
else:
r = requests.get(url, proxies = proxies)
else: # ohter types of requests, e.g., ftp
r = requests.get(url, proxies = proxies)
else: # without proxy
r = requests.get(url)
except __HOLE__:
logger.error('Url is invalid: %s' % url)
return
except requests.exceptions.ConnectionError:
logger.error("Error connecting to %s" % url)
return
if r.status_code != 200:
logger.error('Status code is %d on %s' % (r.status_code, url))
return
return r.content
|
ValueError
|
dataset/ETHPy150Open numb3r3/crawler-python/crawler/core/base.py/get_response
|
478
|
def _get_office(self, raw_result):
office_query = {
'state': STATE,
'name': self._clean_office(raw_result.office)
}
if office_query['name'] is 'President':
office_query['state'] = 'US'
if office_query['name'] in self.district_offices:
#if raw_result.district:
office_query['district'] = raw_result.district or ''
key = Office.make_key(**office_query)
try:
return self._office_cache[key]
except __HOLE__:
try:
office = Office.objects.get(**office_query)
assert key == office.key
self._office_cache[key] = office
return office
except Office.DoesNotExist:
logger.error("\tNo office matching query {}".format(office_query))
raise
|
KeyError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/transform/__init__.py/BaseTransform._get_office
|
479
|
def get_party(self, raw_result, attr='party'):
party = getattr(raw_result, attr)
if not party:
return None
clean_abbrev = self._clean_party(party)
if not clean_abbrev:
return None
try:
return self._party_cache[clean_abbrev]
except __HOLE__:
try:
party = Party.objects.get(abbrev=clean_abbrev)
self._party_cache[clean_abbrev] = party
return party
except Party.DoesNotExist:
logger.error("No party with abbreviation {}".format(clean_abbrev))
raise
|
KeyError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/transform/__init__.py/BaseTransform.get_party
|
480
|
def _clean_party(self, party):
try:
return self.PARTY_MAP[party]
except __HOLE__:
return None
|
KeyError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/transform/__init__.py/BaseTransform._clean_party
|
481
|
def get_candidate_fields(self, raw_result):
year = raw_result.end_date.year
fields = self._get_fields(raw_result, candidate_fields)
try:
name = HumanName(raw_result.full_name)
except __HOLE__:
name = HumanName("{} {}".format(raw_result.given_name, raw_result.family_name))
fields['given_name'] = name.first
fields['family_name'] = name.last
if not fields['full_name']:
fields['full_name'] = "{} {}".format(name.first, name.last)
try:
fields['additional_name'] = name.middle
fields['suffix'] = name.suffix
except Exception,e:
logger.error(e)
return fields
|
TypeError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/transform/__init__.py/BaseTransform.get_candidate_fields
|
482
|
def get_contest(self, raw_result):
"""
Returns the Contest model instance for a given RawResult.
Caches the result in memory to reduce the number of calls to the
datastore.
"""
key = "%s-%s" % (raw_result.election_id, raw_result.contest_slug)
try:
#print self._contest_cache[key]
return self._contest_cache[key]
except KeyError:
#raise
fields = self.get_contest_fields(raw_result)
#print fields
#quit(fields['source'])
fields.pop('source')
try:
#contest = Contest.objects.get(**fields)
try:
contest = Contest.objects.filter(**fields)[0]
except __HOLE__:
contest = Contest.objects.get(**fields)
#print contest
#quit("uuuuuuuuuuuu")
except Exception:
print fields
print "\n"
raise
self._contest_cache[key] = contest
return contest
|
IndexError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/transform/__init__.py/BaseTransform.get_contest
|
483
|
def get_candidate(self, raw_result, extra={}):
"""
Get the Candidate model for a RawResult
Keyword arguments:
* extra - Dictionary of extra query parameters that will
be used to select the candidate.
"""
key = (raw_result.election_id, raw_result.contest_slug,
raw_result.candidate_slug)
try:
return self._candidate_cache[key]
except __HOLE__:
fields = self.get_candidate_fields(raw_result)
fields.update(extra)
del fields['source']
try:
candidate = Candidate.objects.get(**fields)
except Candidate.DoesNotExist:
print fields
raise
self._candidate_cache[key] = candidate
return candidate
|
KeyError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/wa/transform/__init__.py/CreateResultsTransform.get_candidate
|
484
|
def getstat(path):
""" returns the stat information of a file"""
statinfo=None
try:
statinfo=os.lstat(path)
except (IOError, __HOLE__) as e: # FileNotFoundError only since python 3.3
if args.debug:
sys.stderr.write(str(e))
except:
raise
return statinfo
|
OSError
|
dataset/ETHPy150Open FredHutch/swift-commander/swift_commander/swsymlinks.py/getstat
|
485
|
def get_klasses():
modules = [rest_views, generics, serializers]
if viewsets is not None:
modules.append(viewsets)
klasses = {}
for module in modules:
for attr_str in dir(module):
is_subclass = False
attr = getattr(module, attr_str)
try:
is_subclass = (issubclass(attr, View) or
issubclass(attr, BaseSerializer))
except __HOLE__:
pass
if not attr_str.startswith('_') and is_subclass:
add_to_klasses_if_its_restframework(klasses, attr)
for klass in attr.mro():
add_to_klasses_if_its_restframework(klasses, klass)
return klasses
|
TypeError
|
dataset/ETHPy150Open vintasoftware/cdrf.co/rest_framework_ccbv/inspector.py/get_klasses
|
486
|
def kappa(y_true, y_pred, weights=None, allow_off_by_one=False):
"""
Calculates the kappa inter-rater agreement between two the gold standard
and the predicted ratings. Potential values range from -1 (representing
complete disagreement) to 1 (representing complete agreement). A kappa
value of 0 is expected if all agreement is due to chance.
In the course of calculating kappa, all items in `y_true` and `y_pred` will
first be converted to floats and then rounded to integers.
It is assumed that y_true and y_pred contain the complete range of possible
ratings.
This function contains a combination of code from yorchopolis's kappa-stats
and Ben Hamner's Metrics projects on Github.
:param y_true: The true/actual/gold labels for the data.
:type y_true: array-like of float
:param y_pred: The predicted/observed labels for the data.
:type y_pred: array-like of float
:param weights: Specifies the weight matrix for the calculation.
Options are:
- None = unweighted-kappa
- 'quadratic' = quadratic-weighted kappa
- 'linear' = linear-weighted kappa
- two-dimensional numpy array = a custom matrix of
weights. Each weight corresponds to the
:math:`w_{ij}` values in the wikipedia description
of how to calculate weighted Cohen's kappa.
:type weights: str or numpy array
:param allow_off_by_one: If true, ratings that are off by one are counted as
equal, and all other differences are reduced by
one. For example, 1 and 2 will be considered to be
equal, whereas 1 and 3 will have a difference of 1
for when building the weights matrix.
:type allow_off_by_one: bool
"""
logger = logging.getLogger(__name__)
# Ensure that the lists are both the same length
assert(len(y_true) == len(y_pred))
# This rather crazy looking typecast is intended to work as follows:
# If an input is an int, the operations will have no effect.
# If it is a float, it will be rounded and then converted to an int
# because the ml_metrics package requires ints.
# If it is a str like "1", then it will be converted to a (rounded) int.
# If it is a str that can't be typecast, then the user is
# given a hopefully useful error message.
# Note: numpy and python 3.3 use bankers' rounding.
try:
y_true = [int(np.round(float(y))) for y in y_true]
y_pred = [int(np.round(float(y))) for y in y_pred]
except __HOLE__ as e:
logger.error("For kappa, the labels should be integers or strings "
"that can be converted to ints (E.g., '4.0' or '3').")
raise e
# Figure out normalized expected values
min_rating = min(min(y_true), min(y_pred))
max_rating = max(max(y_true), max(y_pred))
# shift the values so that the lowest value is 0
# (to support scales that include negative values)
y_true = [y - min_rating for y in y_true]
y_pred = [y - min_rating for y in y_pred]
# Build the observed/confusion matrix
num_ratings = max_rating - min_rating + 1
observed = confusion_matrix(y_true, y_pred,
labels=list(range(num_ratings)))
num_scored_items = float(len(y_true))
# Build weight array if weren't passed one
if isinstance(weights, string_types):
wt_scheme = weights
weights = None
else:
wt_scheme = ''
if weights is None:
weights = np.empty((num_ratings, num_ratings))
for i in range(num_ratings):
for j in range(num_ratings):
diff = abs(i - j)
if allow_off_by_one and diff:
diff -= 1
if wt_scheme == 'linear':
weights[i, j] = diff
elif wt_scheme == 'quadratic':
weights[i, j] = diff ** 2
elif not wt_scheme: # unweighted
weights[i, j] = bool(diff)
else:
raise ValueError('Invalid weight scheme specified for '
'kappa: {}'.format(wt_scheme))
hist_true = np.bincount(y_true, minlength=num_ratings)
hist_true = hist_true[: num_ratings] / num_scored_items
hist_pred = np.bincount(y_pred, minlength=num_ratings)
hist_pred = hist_pred[: num_ratings] / num_scored_items
expected = np.outer(hist_true, hist_pred)
# Normalize observed array
observed = observed / num_scored_items
# If all weights are zero, that means no disagreements matter.
k = 1.0
if np.count_nonzero(weights):
k -= (sum(sum(weights * observed)) / sum(sum(weights * expected)))
return k
|
ValueError
|
dataset/ETHPy150Open EducationalTestingService/skll/skll/metrics.py/kappa
|
487
|
def makeAnnotatorDistance( infile,
outfile,
builder,
workspace,
workspace_label="direction",
annotations = None ):
'''check statistical association between intervals and
transcription start sites.
'''
to_cluster = True
target_path = os.path.join( os.path.abspath( PARAMS["exportdir"] ),
"annotator_distance",
outfile )
if os.path.exists( target_path): shutil.rmtree( target_path)
try:
os.makedirs( target_path )
except __HOLE__:
pass
options = []
if annotations:
options.append( "--filename-annotations=%s" % annotations )
options = " ".join( options )
statement = '''
python %(scriptsdir)s/annotator_distance.py \
--workspace=%(workspace)s \
--segments=%(infile)s \
--segments-format=bed \
--counter=%(annodist_counter)s \
--workspace-label=%(workspace_label)s \
--sampler=permutation \
--transform-counts=cumulative \
--logscale=x \
--remove-overhangs \
--analysis=proximity \
--num-samples=%(annodist_iterations)i \
--num-bins=%(annodist_bins)i \
--hardcopy=%(target_path)s/%%s.png \
--output-filename-pattern=%(target_path)s/%%s.table \
--workspace-builder=%(builder)s \
--resolution=%(annodist_resolution_intergenic)s \
--plot
%(options)s < /dev/null > %(outfile)s'''
P.run()
|
OSError
|
dataset/ETHPy150Open CGATOxford/cgat/obsolete/pipeline_vitaminD_annotator.py/makeAnnotatorDistance
|
488
|
def main():
parser = argparse.ArgumentParser(description="Fixes the content-type of assets on S3")
parser.add_argument("--access-key", "-a", type=str, required=True, help="The AWS access key")
parser.add_argument("--secret-key", "-s", type=str, required=True, help="The AWS secret key")
parser.add_argument("--bucket", "-b", type=str, required=True, help="The S3 bucket to check")
parser.add_argument("--prefixes", "-p", type=str, default=[""], required=False, nargs="*", help="File path prefixes to check")
parser.add_argument("--workers", "-w", type=int, default=4, required=False, help="The number of workers")
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
parser.add_argument("--dryrun", "-d", action="store_true", default=False, required=False,help="Add this for a dry run (don't change any file)")
args = parser.parse_args()
queue = multiprocessing.Queue()
processes = []
bucket = get_bucket(args.access_key, args.secret_key, args.bucket)
# Start the workers
for _ in xrange(args.workers):
p = multiprocessing.Process(target=check_headers, args=(bucket, queue, args.verbose, args.dryrun))
p.start()
processes.append(p)
# Add the items to the queue
for key in find_matching_files(bucket, args.prefixes):
queue.put(key.name)
# Add None's to the end of the queue, which acts as a signal for the
# proceses to finish
for _ in xrange(args.workers):
queue.put(None)
for p in processes:
# Wait for the processes to finish
try:
p.join()
except __HOLE__:
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open dailymuse/s3-content-type-fixer/s3_content_type_fixer.py/main
|
489
|
def __getitem__(self, name):
try:
return self._class_info[name]
except __HOLE__:
return self._function_info[name]
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/getdocs.py/SuiteInfoBase.__getitem__
|
490
|
def is_row_deleted(self, row_getter):
def predicate(driver):
if self._is_element_present(*self._empty_table_locator):
return True
with self.waits_disabled():
return not self._is_element_displayed(row_getter())
try:
self._wait_until(predicate)
except exceptions.TimeoutException:
return False
except __HOLE__:
return True
return True
|
IndexError
|
dataset/ETHPy150Open openstack/horizon/openstack_dashboard/test/integration_tests/regions/tables.py/TableRegion.is_row_deleted
|
491
|
def project(self, project, **kwargs):
"""Wraps `Rundeck API /project/[NAME] <http://rundeck.org/docs/api/index.html#getting-project-info>`_
:Parameters:
project : str
name of Project
:Keywords:
create : bool
Create the project if it is not found (requires API version >11)
[default: True for API version >11 else False]
:return: A :class:`~.rundeck.connection.RundeckResponse``
:rtype: :class:`~.rundeck.connection.RundeckResponse`
"""
# check if project creation is desired and apply defaults
create = kwargs.pop('create', None)
if create is None:
if self.connection.api_version >= 11:
create = True
else:
create = False
elif create == True:
self.requires_version(11)
rd_url = 'project/{0}'.format(urlquote(project))
# seed project var (seems cleaner than alternatives)
project = None
try:
project = self._exec(GET, rd_url, **kwargs)
except __HOLE__ as exc:
if create:
project = self._exec(POST, rd_url, **kwargs)
else:
raise
return project
|
HTTPError
|
dataset/ETHPy150Open marklap/rundeckrun/rundeck/api.py/RundeckApiTolerant.project
|
492
|
def test_instance(self):
'''
Test creating and deleting instance on Joyent
'''
try:
self.assertIn(
INSTANCE_NAME,
[i.strip() for i in self.run_cloud('-p joyent-test {0}'.format(INSTANCE_NAME))]
)
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME))
raise
# delete the instance
try:
self.assertIn(
INSTANCE_NAME + ':',
[i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME))]
)
except __HOLE__:
raise
|
AssertionError
|
dataset/ETHPy150Open saltstack/salt/tests/integration/cloud/providers/joyent.py/JoyentTest.test_instance
|
493
|
def assertContainsSameWords(self, string1, string2):
try:
for w1, w2 in zip_longest(string1.split(), string2.split(), fillvalue=''):
self.assertEqual(w1, w2)
except __HOLE__:
raise AssertionError("%r does not contain the same words as %r" % (string1,
string2))
|
AssertionError
|
dataset/ETHPy150Open fusionbox/django-widgy/tests/utilstests/tests.py/HtmlToText.assertContainsSameWords
|
494
|
def credentials_are_valid(self, user_settings, client):
if user_settings:
client = client or GitHubClient(external_account=user_settings.external_accounts[0])
try:
client.user()
except (GitHubError, __HOLE__):
return False
return True
|
IndexError
|
dataset/ETHPy150Open CenterForOpenScience/osf.io/website/addons/github/serializer.py/GitHubSerializer.credentials_are_valid
|
495
|
def get_lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = [
"eth0",
"eth1",
"eth2",
"wlan0",
"wlan1",
"wifi0",
"ath0",
"ath1",
"ppp0",
]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except __HOLE__:
pass
return ip
|
IOError
|
dataset/ETHPy150Open xumiao/pymonk/monk/utils/utils.py/get_lan_ip
|
496
|
def get_base_schema(self):
'''
Retrieves base schema, taking into account dynamic typing
'''
schema = self.admin.model
if schema._meta.typed_field:
field = schema._meta.fields[schema._meta.typed_field]
if self.request.GET.get(schema._meta.typed_field, False):
key = self.request.GET[schema._meta.typed_field]
try:
schema = field.schemas[key]
except __HOLE__:
#TODO emit a warning
pass
else:
if self.temporary_document_id():
obj = self.get_temporary_store()
else:
obj = self.object
if obj:
try:
schema = field.schemas[obj[schema._meta.typed_field]]
except KeyError:
#TODO emit a warning
pass
#KeyErrors cause the base schema to return, this should cause needs_typed_selection to return true
return schema
|
KeyError
|
dataset/ETHPy150Open zbyte64/django-dockit/dockit/admin/views.py/DocumentProxyView.get_base_schema
|
497
|
def _unpickle_method(func_name, obj, cls):
"""
Author: Steven Bethard
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except __HOLE__:
pass
else:
break
return func.__get__(obj, cls)
|
KeyError
|
dataset/ETHPy150Open neuropoly/spinalcordtoolbox/scripts/sct_straighten_spinalcord.py/_unpickle_method
|
498
|
def is_number(s):
"""Check if input is float."""
try:
float(s)
return True
except __HOLE__:
return False
|
TypeError
|
dataset/ETHPy150Open neuropoly/spinalcordtoolbox/scripts/sct_straighten_spinalcord.py/is_number
|
499
|
def worker_landmarks_curved(self, arguments_worker):
"""Define landmarks along the centerline. Here, landmarks are densely defined along the centerline, and every
gapxy, a cross of landmarks is created
"""
try:
iz = arguments_worker[0]
iz_curved, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv, x_centerline_fit, y_centerline_fit, z_centerline = arguments_worker[1]
temp_results = []
# loop across z_centerline (0-->zmax)
if z_centerline[iz] in iz_curved:
# at a junction (gapxy): set coordinates for landmarks at the center of the cross
coord = Coordinate([0, 0, 0, 0])
coord.x, coord.y, coord.z = x_centerline_fit[iz], y_centerline_fit[iz], z_centerline[iz]
deriv = Coordinate([0, 0, 0, 0])
deriv.x, deriv.y, deriv.z = x_centerline_deriv[iz], y_centerline_deriv[iz], z_centerline_deriv[iz]
temp_results.append(coord)
# compute cross
cross_coordinates = compute_cross_centerline(coord, deriv, self.gapxy)
for coord in cross_coordinates:
temp_results.append(coord)
else:
# not a junction: do not create the cross.
temp_results.append(Coordinate([x_centerline_fit[iz], y_centerline_fit[iz], z_centerline[iz], 0], mode='continuous'))
return iz, temp_results
except __HOLE__:
return
except Exception as e:
print 'Error on line {}'.format(sys.exc_info()[-1].tb_lineno)
raise e
|
KeyboardInterrupt
|
dataset/ETHPy150Open neuropoly/spinalcordtoolbox/scripts/sct_straighten_spinalcord.py/SpinalCordStraightener.worker_landmarks_curved
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.