Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
1,900
|
def lookup_level(level):
"""Return the integer representation of a logging level."""
if isinstance(level, integer_types):
return level
try:
return _reverse_level_names[level]
except __HOLE__:
raise LookupError('unknown level name %s' % level)
|
KeyError
|
dataset/ETHPy150Open getlogbook/logbook/logbook/base.py/lookup_level
|
1,901
|
def get_level_name(level):
"""Return the textual representation of logging level 'level'."""
try:
return _level_names[level]
except __HOLE__:
raise LookupError('unknown level')
|
KeyError
|
dataset/ETHPy150Open getlogbook/logbook/logbook/base.py/get_level_name
|
1,902
|
@cached_property
def message(self):
"""The formatted message."""
if not (self.args or self.kwargs):
return self.msg
try:
try:
return self._format_message(self.msg, *self.args,
**self.kwargs)
except UnicodeDecodeError:
# Assume an unicode message but mixed-up args
msg = self.msg.encode('utf-8', 'replace')
return self._format_message(msg, *self.args, **self.kwargs)
except (UnicodeEncodeError, __HOLE__):
# we catch AttributeError since if msg is bytes,
# it won't have the 'format' method
if (sys.exc_info()[0] is AttributeError
and (PY2 or not isinstance(self.msg, bytes))):
# this is not the case we thought it is...
raise
# Assume encoded message with unicode args.
# The assumption of utf8 as input encoding is just a guess,
# but this codepath is unlikely (if the message is a constant
# string in the caller's source file)
msg = self.msg.decode('utf-8', 'replace')
return self._format_message(msg, *self.args, **self.kwargs)
except Exception:
# this obviously will not give a proper error message if the
# information was not pulled and the log record no longer has
# access to the frame. But there is not much we can do about
# that.
e = sys.exc_info()[1]
errormsg = ('Could not format message with provided '
'arguments: {err}\n msg={msg!r}\n '
'args={args!r} \n kwargs={kwargs!r}.\n'
'Happened in file {file}, line {lineno}').format(
err=e, msg=self.msg, args=self.args,
kwargs=self.kwargs, file=self.filename,
lineno=self.lineno
)
if PY2:
errormsg = errormsg.encode('utf-8')
raise TypeError(errormsg)
|
AttributeError
|
dataset/ETHPy150Open getlogbook/logbook/logbook/base.py/LogRecord.message
|
1,903
|
def enable(self):
"""Convenience method to enable this logger.
:raises AttributeError: The disabled property is read-only, typically
because it was overridden in a subclass.
.. versionadded:: 1.0
"""
try:
self.disabled = False
except __HOLE__:
raise AttributeError('The disabled property is read-only.')
|
AttributeError
|
dataset/ETHPy150Open getlogbook/logbook/logbook/base.py/LoggerMixin.enable
|
1,904
|
def disable(self):
"""Convenience method to disable this logger.
:raises AttributeError: The disabled property is read-only, typically
because it was overridden in a subclass.
.. versionadded:: 1.0
"""
try:
self.disabled = True
except __HOLE__:
raise AttributeError('The disabled property is read-only.')
|
AttributeError
|
dataset/ETHPy150Open getlogbook/logbook/logbook/base.py/LoggerMixin.disable
|
1,905
|
def main(argv):
global _PROG, _VERBOSE
_PROG = argv[0]
try:
opts, args = getopt.getopt(
argv[1:],
'hvu:g:m:', (
'help',
'verbose',
'user=',
'group=',
'mode=', ))
except getopt.GetoptError, e:
barf('invalid flag: -{0}{1}'.format('-' if 1 < len(e.opt) else '',
e.opt))
try:
user, group, mode = None, None, None
for flag, opt in opts:
if '-v' == flag or '--verbose' == flag:
_VERBOSE = True
elif '-u' == flag or '--user' == flag:
user = opt
elif '-g' == flag or '--group' == flag:
group = opt
elif '-m' == flag or '--mode' == flag:
try:
mode = int(opt, 8)
except __HOLE__:
barf('invalid mode: {}'.format(opt))
elif '-h' == flag or '--help' == flag:
usage(1)
except ( fsq.FSQEnvError, fsq.FSQCoerceError, ):
barf('invalid argument for flag: {0}'.format(flag))
try:
for arg in args:
fsq.down(queue=arg, user=user, group=group, mode=mode)
chirp('{0}: down'.format(arg))
except fsq.FSQCoerceError, e:
barf('cannot coerce queue; charset={0}'.format(_CHARSET))
except fsq.FSQError, e:
shout(e.strerror.encode(_CHARSET))
|
ValueError
|
dataset/ETHPy150Open axialmarket/fsq/libexec/fsq/down.py/main
|
1,906
|
def validate_value_type(value_type, value):
try:
return _VALIDATOR_MAP[value_type](value)
except (__HOLE__, TypeError) as err:
raise ValidationError(err)
### attribute base type validation and conversion
|
ValueError
|
dataset/ETHPy150Open kdart/pycopia/storage/pycopia/db/types.py/validate_value_type
|
1,907
|
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except __HOLE__:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
|
ValueError
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/db/backends/mysql/base.py/DatabaseOperations.date_trunc_sql
|
1,908
|
def main():
resampler = apiai.Resampler(source_samplerate=RATE)
vad = apiai.VAD()
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
request = ai.voice_request()
request.lang = 'en' # optional, default value equal 'en'
def callback(in_data, frame_count, time_info, status):
frames, data = resampler.resample(in_data, frame_count)
state = vad.processFrame(frames)
request.send(data)
if (state == 1):
return in_data, pyaudio.paContinue
else:
return in_data, pyaudio.paComplete
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=False,
frames_per_buffer=CHUNK,
stream_callback=callback)
stream.start_stream()
print ("Say! Press enter for stop audio recording.")
try:
L = []
thread.start_new_thread(input_thread, (L,))
while stream.is_active() and len(L) == 0:
time.sleep(0.1)
except Exception:
raise e
except __HOLE__:
pass
stream.stop_stream()
stream.close()
p.terminate()
print ("Wait for response...")
response = request.getresponse()
print (response.read())
|
KeyboardInterrupt
|
dataset/ETHPy150Open api-ai/api-ai-python/examples/pyaudio_example.py/main
|
1,909
|
def import_tags(sess, store):
for tag in sess.query(Tag).all():
try:
repos_name = tag.repository.name
tag_name = tag.name
repos_namespace = tag.repository.user.username
image_id = tag.revision.id
path = store.tag_path(repos_namespace, repos_name, tag_name)
if store.exists(path):
continue
dest = store.put_content(path, image_id)
print('{0} -> {1}'.format(dest, image_id))
except __HOLE__ as e:
print('# Warning: {0}'.format(e))
|
AttributeError
|
dataset/ETHPy150Open docker/docker-registry/scripts/import_old_tags.py/import_tags
|
1,910
|
def addRecentFile( application, fileName ) :
if isinstance( application, Gaffer.Application ) :
applicationRoot = application.root()
else :
applicationRoot = application
try :
applicationRoot.__recentFiles
except __HOLE__ :
applicationRoot.__recentFiles = []
if fileName in applicationRoot.__recentFiles :
applicationRoot.__recentFiles.remove( fileName )
applicationRoot.__recentFiles.insert( 0, fileName )
del applicationRoot.__recentFiles[6:]
f = file( os.path.join( applicationRoot.preferencesLocation(), "recentFiles.py" ), "w" )
f.write( "# This file was automatically generated by Gaffer.\n" )
f.write( "# Do not edit this file - it will be overwritten.\n\n" )
f.write( "import GafferUI\n" )
for fileName in reversed( applicationRoot.__recentFiles ) :
f.write( "GafferUI.FileMenu.addRecentFile( application, \"%s\" )\n" % fileName )
## A function suitable as the command for a File/Save menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
|
AttributeError
|
dataset/ETHPy150Open ImageEngine/gaffer/python/GafferUI/FileMenu.py/addRecentFile
|
1,911
|
def clean(self, value):
super(SIEMSOField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.strip()
m = self.emso_regex.match(value)
if m is None:
raise ValidationError(self.default_error_messages['invalid'])
# Validate EMSO
s = 0
int_values = [int(i) for i in value]
for a, b in zip(int_values, range(7, 1, -1) * 2):
s += a * b
chk = s % 11
if chk == 0:
K = 0
else:
K = 11 - chk
if K == 10 or int_values[-1] != K:
raise ValidationError(self.default_error_messages['checksum'])
# Extract extra info in the identification number
day, month, year, nationality, gender, chksum = [int(i) for i in m.groups()]
if year < 890:
year += 2000
else:
year += 1000
# validate birthday
try:
birthday = datetime.date(year, month, day)
except __HOLE__:
raise ValidationError(self.error_messages['date'])
if datetime.date.today() < birthday:
raise ValidationError(self.error_messages['date'])
self.info = {
'gender': gender < 500 and 'male' or 'female',
'birthdate': birthday,
'nationality': nationality,
}
return value
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/localflavor/si/forms.py/SIEMSOField.clean
|
1,912
|
@classmethod
def from_config(cls, cloud_config, region):
try:
backend = cloud_config['BACKEND']
except __HOLE__:
raise LookupError("No backend specified!")
try:
backend_cls = backends[backend]
except KeyError:
raise LookupError(
"Unknown backend `{}`. Please choose one of {}".format(
backend, backends.keys()))
return backend_cls(cloud_config, region)
|
KeyError
|
dataset/ETHPy150Open onefinestay/gonzo/gonzo/clouds/compute.py/Cloud.from_config
|
1,913
|
def get_next_az(self, environment, server_type):
available_azs = self.list_availability_zones()
try:
newest_instance_az = self.list_instances_by_type(
environment,
server_type)[-1].extra['gonzo_az']
except __HOLE__:
return available_azs[0]
if len(available_azs) == 1:
return available_azs[0]
else:
for index, availabie_az in enumerate(available_azs):
if availabie_az.name == newest_instance_az:
if (index + 1) == len(available_azs):
return available_azs[0]
else:
return available_azs[index + 1]
|
IndexError
|
dataset/ETHPy150Open onefinestay/gonzo/gonzo/clouds/compute.py/Cloud.get_next_az
|
1,914
|
def querycrawler(self, path=None, curdepth=0, maxdepth=1):
self.log.debug('Crawler is visiting %s' % path)
localcrawlpaths = list()
if curdepth > maxdepth:
self.log.info('maximum depth %s reached' % maxdepth)
return
r = self.request(path=path)
if r is None:
return
response, responsebody = r
try:
soup = BeautifulSoup(responsebody)
except:
self.log.warn('could not parse the response body')
return
tags = soup('a')
for tag in tags:
try:
href = tag["href"]
if href is not None:
tmpu = urlparse(href)
if (tmpu[1] != '') and (self.target != tmpu[1]):
# not on the same domain name .. ignore
self.log.debug('Ignoring link because it is not on the same site %s' % href)
continue
if tmpu[0] not in ['http', 'https', '']:
self.log.debug('Ignoring link because it is not an http uri %s' % href)
continue
path = tmpu[2]
if not path.startswith('/'):
path = '/' + path
if len(tmpu[4]) > 0:
# found a query .. thats all we need
location = urlunparse(('', '', path, tmpu[3], tmpu[4], ''))
self.log.info('Found query %s' % location)
return href
if path not in self.crawlpaths:
href = unquote(path)
self.log.debug('adding %s for crawling' % href)
self.crawlpaths.append(href)
localcrawlpaths.append(href)
except __HOLE__:
pass
for nextpath in localcrawlpaths:
r = self.querycrawler(path=nextpath, curdepth=curdepth + 1, maxdepth=maxdepth)
if r:
return r
|
KeyError
|
dataset/ETHPy150Open sandrogauci/wafw00f/wafw00f/lib/evillib.py/waftoolsengine.querycrawler
|
1,915
|
def _parse_proxy(self, proxy):
parts = urlparse(proxy)
if not parts.scheme or not parts.netloc:
raise Exception("Invalid proxy specified, scheme required")
netloc = parts.netloc.split(":")
if len(netloc) != 2:
raise Exception("Proxy port unspecified")
try:
if parts.scheme == "socks5":
if socks is None:
raise Exception("socks5 proxy requires PySocks")
return Socks5Proxy(netloc[0], int(netloc[1]))
elif parts.scheme == "http":
return HttpProxy(netloc[0], int(netloc[1]))
else:
raise Exception("Unsupported proxy scheme")
except __HOLE__:
raise Exception("Invalid port number")
|
ValueError
|
dataset/ETHPy150Open sandrogauci/wafw00f/wafw00f/lib/evillib.py/waftoolsengine._parse_proxy
|
1,916
|
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError: if the file is a symbolic
link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except __HOLE__ as e:
# If we can't access with _mode, try _fallback_mode and
# don't lock.
if e.errno in (errno.EPERM, errno.EACCES):
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)
self._locked = True
return
except IOError as e:
# If not retrying, then just pass on the error.
if timeout == 0:
raise
if e.errno != errno.EACCES:
raise
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds',
self._filename, timeout)
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
|
IOError
|
dataset/ETHPy150Open google/oauth2client/oauth2client/contrib/_fcntl_opener.py/_FcntlOpener.open_and_lock
|
1,917
|
def _open(self):
# read the OLE directory and see if this is a likely
# to be a Microsoft Image Composer file
try:
self.ole = OleFileIO(self.fp)
except __HOLE__:
raise SyntaxError, "not an MIC file; invalid OLE file"
# find ACI subfiles with Image members (maybe not the
# best way to identify MIC files, but what the... ;-)
self.images = []
for file in self.ole.listdir():
if file[1:] and file[0][-4:] == ".ACI" and file[1] == "Image":
self.images.append(file)
# if we didn't find any images, this is probably not
# an MIC file.
if not self.images:
raise SyntaxError, "not an MIC file; no image entries"
self.__fp = self.fp
self.frame = 0
if len(self.images) > 1:
self.category = Image.CONTAINER
self.seek(0)
|
IOError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/site-packages/PIL/MicImagePlugin.py/MicImageFile._open
|
1,918
|
def seek(self, frame):
try:
filename = self.images[frame]
except __HOLE__:
raise EOFError, "no such frame"
self.fp = self.ole.openstream(filename)
TiffImagePlugin.TiffImageFile._open(self)
self.frame = frame
|
IndexError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/site-packages/PIL/MicImagePlugin.py/MicImageFile.seek
|
1,919
|
def __getattr__(self, key):
if key in self.__pushed_up:
owner = self.__pushed_up[key]
value = getattr(getattr(self, owner), key)
setattr(self, key, value)
return value
elif key in self.__all__:
module = __import__(
self.__name__ + '.' + key, None, None, [self.__name__])
setattr(self, key, module)
return module
else:
try:
return ModuleType.__getattribute__(self, key)
except __HOLE__:
raise AttributeError(
'module %r has no attribute %r' % (self.__name__, key))
|
AttributeError
|
dataset/ETHPy150Open jek/flatland/flatland/util/deferred.py/deferred_module.__getattr__
|
1,920
|
def get_price(self, currency=None, orderitem=None):
"""
This method is part of the public, required API of products. It returns
either a price instance or raises a ``DoesNotExist`` exception.
If you need more complex pricing schemes, override this method with
your own implementation.
"""
if currency is None:
currency = (
orderitem.currency if orderitem else
plata.shop_instance().default_currency())
try:
# Let's hope that ordering=[-id] from the base price definition
# makes any sense here :-)
return self.prices.filter(currency=currency)[0]
except __HOLE__:
raise self.prices.model.DoesNotExist
|
IndexError
|
dataset/ETHPy150Open matthiask/plata/plata/product/models.py/ProductBase.get_price
|
1,921
|
def testAFakeZlib(self):
#
# This could cause a stack overflow before: importing zlib.py
# from a compressed archive would cause zlib to be imported
# which would find zlib.py in the archive, which would... etc.
#
# This test *must* be executed first: it must be the first one
# to trigger zipimport to import zlib (zipimport caches the
# zlib.decompress function object, after which the problem being
# tested here wouldn't be a problem anymore...
# (Hence the 'A' in the test method name: to make it the first
# item in a list sorted by name, like unittest.makeSuite() does.)
#
# This test fails on platforms on which the zlib module is
# statically linked, but the problem it tests for can't
# occur in that case (builtin modules are always found first),
# so we'll simply skip it then. Bug #765456.
#
if "zlib" in sys.builtin_module_names:
return
if "zlib" in sys.modules:
del sys.modules["zlib"]
files = {"zlib.py": (NOW, test_src)}
try:
self.doTest(".py", files, "zlib")
except __HOLE__:
if self.compression != ZIP_DEFLATED:
self.fail("expected test to not raise ImportError")
else:
if self.compression != ZIP_STORED:
self.fail("expected test to raise ImportError")
|
ImportError
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_zipimport.py/UncompressedZipImportTestCase.testAFakeZlib
|
1,922
|
def testBadMagic2(self):
# make pyc magic word invalid, causing an ImportError
badmagic_pyc = bytearray(test_pyc)
badmagic_pyc[0] ^= 0x04 # flip an arbitrary bit
files = {TESTMOD + pyc_ext: (NOW, badmagic_pyc)}
try:
self.doTest(".py", files, TESTMOD)
except __HOLE__:
pass
else:
self.fail("expected ImportError; import from bad pyc")
|
ImportError
|
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_zipimport.py/UncompressedZipImportTestCase.testBadMagic2
|
1,923
|
def reRepl(m):
ansiTypes = {
'clear': ansiFormatter.cmdReset(),
'hashtag': ansiFormatter.cmdBold(),
'profile': ansiFormatter.cmdUnderline(),
}
s = None
try:
mkey = m.lastgroup
if m.group(mkey):
s = '%s%s%s' % (ansiTypes[mkey], m.group(mkey), ansiTypes['clear'])
except __HOLE__:
pass
return s
|
IndexError
|
dataset/ETHPy150Open sixohsix/twitter/twitter/cmdline.py/reRepl
|
1,924
|
def __call__(self, twitter, options):
action = actions.get(options['action'], NoSuchAction)()
try:
doAction = lambda: action(twitter, options)
if options['refresh'] and isinstance(action, StatusAction):
while True:
doAction()
sys.stdout.flush()
time.sleep(options['refresh_rate'])
else:
doAction()
except __HOLE__:
print('\n[Keyboard Interrupt]', file=sys.stderr)
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open sixohsix/twitter/twitter/cmdline.py/Action.__call__
|
1,925
|
def __call__(self, twitter, options):
prompt = self.render_prompt(options.get('prompt', 'twitter> '))
while True:
options['action'] = ""
try:
args = input(prompt).split()
parse_args(args, options)
if not options['action']:
continue
elif options['action'] == 'exit':
raise SystemExit(0)
elif options['action'] == 'shell':
print('Sorry Xzibit does not work here!', file=sys.stderr)
continue
elif options['action'] == 'help':
print('''\ntwitter> `action`\n
The Shell accepts all the command line actions along with:
exit Leave the twitter shell (^D may also be used)
Full CMD Line help is appended below for your convenience.''',
file=sys.stderr)
Action()(twitter, options)
options['action'] = ''
except NoSuchActionError as e:
print(e, file=sys.stderr)
except __HOLE__:
print('\n[Keyboard Interrupt]', file=sys.stderr)
except EOFError:
print(file=sys.stderr)
leaving = self.ask(subject='Leave')
if not leaving:
print('Excellent!', file=sys.stderr)
else:
raise SystemExit(0)
|
KeyboardInterrupt
|
dataset/ETHPy150Open sixohsix/twitter/twitter/cmdline.py/TwitterShell.__call__
|
1,926
|
def _key(self, rawresult):
"""
Returns a string that uniquely identifies a raw result from a particular
source.
"""
bits = [rawresult.contest_slug, rawresult.candidate_slug,
slugify(rawresult.jurisdiction)]
if rawresult.district:
bits.append(rawresult.district)
try:
bits.append(rawresult.reporting_district)
except __HOLE__:
pass
return '-'.join(bits)
|
AttributeError
|
dataset/ETHPy150Open openelections/openelections-core/openelex/us/fl/load.py/LoadResults._key
|
1,927
|
@background.setter
def background(self, value):
self._color = value
self.refresh()
# propagate changes to every clone
if self.editor:
for clone in self.editor.clones:
try:
clone.modes.get(self.__class__).background = value
except __HOLE__:
# this should never happen since we're working with clones
pass
|
KeyError
|
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/pyqode/core/modes/caret_line_highlight.py/CaretLineHighlighterMode.background
|
1,928
|
def test_illegal_mode():
try:
builder.QRCodeBuilder('test', 1, mode='murks', error='M')
raise Exception('Expected an error for illegal mode')
except __HOLE__ as ex:
ok_('murks' in str(ex))
|
ValueError
|
dataset/ETHPy150Open mnooner256/pyqrcode/tests/test_builder.py/test_illegal_mode
|
1,929
|
def test_illegal_error():
try:
builder.QRCodeBuilder('123', version=40, mode='numeric', error='R')
raise Exception('Expected an error for illegal mode')
except __HOLE__ as ex:
ok_('R' in str(ex))
|
ValueError
|
dataset/ETHPy150Open mnooner256/pyqrcode/tests/test_builder.py/test_illegal_error
|
1,930
|
def test_illegal_version():
try:
builder.QRCodeBuilder('123', version=41, mode='numeric', error='M')
raise Exception('Expected an error for illegal mode')
except __HOLE__ as ex:
ok_('41' in str(ex))
|
ValueError
|
dataset/ETHPy150Open mnooner256/pyqrcode/tests/test_builder.py/test_illegal_version
|
1,931
|
def get_urls(self):
urlpatterns = super(RedisServerAdmin, self).get_urls()
try:
from django.conf.urls import patterns, url
except __HOLE__:
from django.conf.urls.defaults import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
return patterns(
'',
url(r'^(\d+)/inspect/$',
wrap(self.inspect_view),
name='redisboard_redisserver_inspect'),
) + urlpatterns
|
ImportError
|
dataset/ETHPy150Open ionelmc/django-redisboard/src/redisboard/admin.py/RedisServerAdmin.get_urls
|
1,932
|
def update_ipaddress(self):
'''
Updates the scheduler so it knows its own ip address
'''
# assign local ip in case of exception
self.old_ip = self.my_ip
self.my_ip = '127.0.0.1'
try:
obj = urllib2.urlopen(settings.get('PUBLIC_IP_URL',
'http://ip.42.pl/raw'))
results = self.ip_regex.findall(obj.read())
if len(results) > 0:
self.my_ip = results[0]
else:
raise IOError("Could not get valid IP Address")
obj.close()
self.logger.debug("Current public ip: {ip}".format(ip=self.my_ip))
except __HOLE__:
self.logger.error("Could not reach out to get public ip")
pass
if self.old_ip != self.my_ip:
self.logger.info("Changed Public IP: {old} -> {new}".format(
old=self.old_ip, new=self.my_ip))
|
IOError
|
dataset/ETHPy150Open istresearch/scrapy-cluster/crawler/crawling/distributed_scheduler.py/DistributedScheduler.update_ipaddress
|
1,933
|
def next_request(self):
'''
Logic to handle getting a new url request, from a bunch of
different queues
'''
t = time.time()
# update the redis queues every so often
if t - self.update_time > self.update_interval:
self.update_time = t
self.create_queues()
# update the ip address every so often
if t - self.update_ip_time > self.ip_update_interval:
self.update_ip_time = t
self.update_ipaddress()
self.report_self()
item = self.find_item()
if item:
self.logger.debug("Found url to crawl {url}" \
.format(url=item['url']))
try:
req = Request(item['url'])
except __HOLE__:
# need absolute url
# need better url validation here
req = Request('http://' + item['url'])
if 'meta' in item:
item = item['meta']
# defaults not in schema
if 'curdepth' not in item:
item['curdepth'] = 0
if "retry_times" not in item:
item['retry_times'] = 0
for key in item.keys():
req.meta[key] = item[key]
# extra check to add items to request
if 'useragent' in item and item['useragent'] is not None:
req.headers['User-Agent'] = item['useragent']
if 'cookie' in item and item['cookie'] is not None:
if isinstance(item['cookie'], dict):
req.cookies = item['cookie']
elif isinstance(item['cookie'], basestring):
req.cookies = self.parse_cookie(item['cookie'])
return req
return None
|
ValueError
|
dataset/ETHPy150Open istresearch/scrapy-cluster/crawler/crawling/distributed_scheduler.py/DistributedScheduler.next_request
|
1,934
|
def __write_data(self):
while len(self.write_buffer) > 0:
try:
buffer = self.write_buffer[0]
self.socket.sendall(buffer)
except __HOLE__ as e:
self.cleanup()
return
self.write_buffer.pop(0)
self.net_write = None
|
IOError
|
dataset/ETHPy150Open adamb70/CSGO-Market-Float-Finder/pysteamkit/steam3/connection.py/TCPConnection.__write_data
|
1,935
|
def __read_data(self):
while self.socket:
try:
data = self.socket.recv(4096)
except __HOLE__ as e:
self.cleanup()
return
if len(data) == 0:
self.cleanup()
return
self.data_received(data)
|
IOError
|
dataset/ETHPy150Open adamb70/CSGO-Market-Float-Finder/pysteamkit/steam3/connection.py/TCPConnection.__read_data
|
1,936
|
def __call__(self, bundles=None, output=None, directory=None, no_cache=None,
manifest=None, production=None):
"""Build assets.
``bundles``
A list of bundle names. If given, only this list of bundles
should be built.
``output``
List of (bundle, filename) 2-tuples. If given, only these
bundles will be built, using the custom output filenames.
Cannot be used with ``bundles``.
``directory``
Custom output directory to use for the bundles. The original
basenames defined in the bundle ``output`` attribute will be
used. If the ``output`` of the bundles are pointing to different
directories, they will be offset by their common prefix.
Cannot be used with ``output``.
``no_cache``
If set, a cache (if one is configured) will not be used.
``manifest``
If set, the given manifest instance will be used, instead of
any that might have been configured in the Environment. The value
passed will be resolved through ``get_manifest()``. If this fails,
a file-based manifest will be used using the given value as the
filename.
``production``
If set to ``True``, then :attr:`Environment.debug`` will forcibly
be disabled (set to ``False``) during the build.
"""
# Validate arguments
if bundles and output:
raise CommandError(
'When specifying explicit output filenames you must '
'do so for all bundles you want to build.')
if directory and output:
raise CommandError('A custom output directory cannot be '
'combined with explicit output filenames '
'for individual bundles.')
if production:
# TODO: Reset again (refactor commands to be classes)
self.environment.debug = False
# TODO: Oh how nice it would be to use the future options stack.
if manifest is not None:
try:
manifest = get_manifest(manifest, env=self.environment)
except __HOLE__:
manifest = get_manifest(
# abspath() is important, or this will be considered
# relative to Environment.directory.
"file:%s" % os.path.abspath(manifest),
env=self.environment)
self.environment.manifest = manifest
# Use output as a dict.
if output:
output = dict(output)
# Validate bundle names
bundle_names = bundles if bundles else (output.keys() if output else [])
for name in bundle_names:
if not name in self.environment:
raise CommandError(
'I do not know a bundle name named "%s".' % name)
# Make a list of bundles to build, and the filename to write to.
if bundle_names:
# TODO: It's not ok to use an internal property here.
bundles = [(n,b) for n, b in self.environment._named_bundles.items()
if n in bundle_names]
else:
# Includes unnamed bundles as well.
bundles = [(None, b) for b in self.environment]
# Determine common prefix for use with ``directory`` option.
if directory:
prefix = os.path.commonprefix(
[os.path.normpath(b.resolve_output())
for _, b in bundles if b.output])
# dirname() gives the right value for a single file.
prefix = os.path.dirname(prefix)
to_build = []
for name, bundle in bundles:
# TODO: We really should support this. This error here
# is just in place of a less understandable error that would
# otherwise occur.
if bundle.is_container and directory:
raise CommandError(
'A custom output directory cannot currently be '
'used with container bundles.')
# Determine which filename to use, if not the default.
overwrite_filename = None
if output:
overwrite_filename = output[name]
elif directory:
offset = os.path.normpath(
bundle.resolve_output())[len(prefix)+1:]
overwrite_filename = os.path.join(directory, offset)
to_build.append((bundle, overwrite_filename, name,))
# Build.
built = []
for bundle, overwrite_filename, name in to_build:
if name:
# A name is not necessary available of the bundle was
# registered without one.
self.log.info("Building bundle: %s (to %s)" % (
name, overwrite_filename or bundle.output))
else:
self.log.info("Building bundle: %s" % bundle.output)
try:
if not overwrite_filename:
with bundle.bind(self.environment):
bundle.build(force=True, disable_cache=no_cache)
else:
# TODO: Rethink how we deal with container bundles here.
# As it currently stands, we write all child bundles
# to the target output, merged (which is also why we
# create and force writing to a StringIO instead of just
# using the ``Hunk`` objects that build() would return
# anyway.
output = StringIO()
with bundle.bind(self.environment):
bundle.build(force=True, output=output,
disable_cache=no_cache)
if directory:
# Only auto-create directories in this mode.
output_dir = os.path.dirname(overwrite_filename)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
MemoryHunk(output.getvalue()).save(overwrite_filename)
built.append(bundle)
except BuildError as e:
self.log.error("Failed, error was: %s" % e)
if len(built):
self.event_handlers['post_build']()
if len(built) != len(to_build):
return 2
|
ValueError
|
dataset/ETHPy150Open miracle2k/webassets/src/webassets/script.py/BuildCommand.__call__
|
1,937
|
def __call__(self, loop=None):
"""Watch assets for changes.
``loop``
A callback, taking no arguments, to be called once every loop
iteration. Can be useful to integrate the command with other code.
If not specified, the loop wil call ``time.sleep()``.
"""
# TODO: This should probably also restart when the code changes.
mtimes = {}
try:
# Before starting to watch for changes, also recognize changes
# made while we did not run, and apply those immediately.
for bundle in self.environment:
print('Bringing up to date: %s' % bundle.output)
bundle.build(force=False)
self.log.info("Watching %d bundles for changes..." %
len(self.environment))
while True:
changed_bundles = self.check_for_changes(mtimes)
built = []
for bundle in changed_bundles:
print("Building bundle: %s ..." % bundle.output, end=' ')
sys.stdout.flush()
try:
bundle.build(force=True)
built.append(bundle)
except BuildError as e:
print("")
print("Failed: %s" % e)
else:
print("done")
if len(built):
self.event_handlers['post_build']()
do_end = loop() if loop else time.sleep(0.1)
if do_end:
break
except __HOLE__:
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open miracle2k/webassets/src/webassets/script.py/WatchCommand.__call__
|
1,938
|
def invoke(self, command, args):
"""Invoke ``command``, or throw a CommandError.
This is essentially a simple validation mechanism. Feel free
to call the individual command methods manually.
"""
try:
function = self.commands[command]
except __HOLE__ as e:
raise CommandError('unknown command: %s' % e)
else:
return function(**args)
# List of commands installed
|
KeyError
|
dataset/ETHPy150Open miracle2k/webassets/src/webassets/script.py/CommandLineEnvironment.invoke
|
1,939
|
def __init__(self, env=None, log=None, prog=None, no_global_options=False):
try:
import argparse
except __HOLE__:
raise RuntimeError(
'The webassets command line now requires the '
'"argparse" library on Python versions <= 2.6.')
else:
self.argparse = argparse
self.env = env
self.log = log
self._construct_parser(prog, no_global_options)
|
ImportError
|
dataset/ETHPy150Open miracle2k/webassets/src/webassets/script.py/GenericArgparseImplementation.__init__
|
1,940
|
def run_with_argv(self, argv):
try:
ns = self.parser.parse_args(argv)
except __HOLE__ as e:
# We do not want the main() function to exit the program.
# See run() instead.
return e.args[0]
return self.run_with_ns(ns)
|
SystemExit
|
dataset/ETHPy150Open miracle2k/webassets/src/webassets/script.py/GenericArgparseImplementation.run_with_argv
|
1,941
|
def __call__(self, sid, *args, **kwargs):
"""
Return a token.
:return:
"""
try:
_sinfo = kwargs['sinfo']
except KeyError:
exp = self.do_exp(**kwargs)
_tid = kwargs['target_id']
else:
if 'lifetime' in kwargs:
_sinfo['lifetime'] = kwargs['lifetime']
exp = self.do_exp(**_sinfo)
_tid = _sinfo['client_id']
if 'scope' not in kwargs:
_scope = None
try:
_scope = _sinfo['scope']
except KeyError:
ar = json.loads(_sinfo['authzreq'])
try:
_scope = ar['scope']
except KeyError:
pass
if _scope:
kwargs['scope'] = ' ' .join(_scope)
if self.usage == 'authorization_grant':
try:
kwargs['sub'] = _sinfo['sub']
except __HOLE__:
pass
del kwargs['sinfo']
if 'aud' in kwargs:
if _tid not in kwargs['aud']:
kwargs['aud'].append(_tid)
else:
kwargs['aud'] = [_tid]
if self.usage == 'client_authentication':
try:
kwargs['sub'] = _tid
except KeyError:
pass
else:
if 'azp' not in kwargs:
kwargs['azp'] = _tid
for param in ['lifetime', 'grant_type', 'response_type', 'target_id']:
try:
del kwargs[param]
except KeyError:
pass
try:
kwargs['kid'] = self.extra_claims['kid']
except:
pass
_jti = '{}-{}'.format(self.type, uuid.uuid4().hex)
_jwt = self.pack(jti=_jti, exp=exp, **kwargs)
self.db[_jti] = sid
return _jwt
|
KeyError
|
dataset/ETHPy150Open rohe/pyoidc/src/oic/extension/token.py/JWTToken.__call__
|
1,942
|
def do_exp(self, **kwargs):
try:
lifetime = kwargs['lifetime']
except KeyError:
try:
rt = ' '.join(kwargs['response_type'])
except KeyError:
rt = ' '.join(kwargs['grant_type'])
try:
lifetime = self.lt_pattern[rt]
except __HOLE__:
lifetime = self.lt_pattern['']
return utc_time_sans_frac() + lifetime
|
KeyError
|
dataset/ETHPy150Open rohe/pyoidc/src/oic/extension/token.py/JWTToken.do_exp
|
1,943
|
def invalidate(self, token):
info = self.unpack(token)
try:
del self.db[info['jti']]
except __HOLE__:
return False
return True
|
KeyError
|
dataset/ETHPy150Open rohe/pyoidc/src/oic/extension/token.py/JWTToken.invalidate
|
1,944
|
def handle_noargs(self, **options):
db = options.get('database')
connection = connections[db]
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except __HOLE__:
pass
sql_list = sql_flush(self.style, connection, only_django=True)
if interactive:
confirm = raw_input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed(using=db)
raise CommandError("""Database %s couldn't be flushed. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the expected database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""" % (connection.settings_dict['NAME'], e))
transaction.commit_unless_managed(using=db)
# Emit the post sync signal. This allows individual
# applications to respond as if the database had been
# sync'd from scratch.
all_models = []
for app in models.get_apps():
all_models.extend([
m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)
])
emit_post_sync_signal(set(all_models), verbosity, interactive, db)
# Reinstall the initial_data fixture.
kwargs = options.copy()
kwargs['database'] = db
call_command('loaddata', 'initial_data', **kwargs)
else:
print "Flush cancelled."
|
ImportError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/core/management/commands/flush.py/Command.handle_noargs
|
1,945
|
def _v1_auth(self, token_url):
creds = self.creds
headers = {}
headers['X-Auth-User'] = creds['username']
headers['X-Auth-Key'] = creds['password']
tenant = creds.get('tenant')
if tenant:
headers['X-Auth-Tenant'] = tenant
resp, resp_body = self._do_request(token_url, 'GET', headers=headers)
def _management_url(self, resp):
for url_header in ('x-heat-management-url',
'x-server-management-url',
'x-heat'):
try:
return resp[url_header]
except __HOLE__ as e:
not_found = e
raise not_found
if resp.status in (200, 204):
try:
self.management_url = _management_url(self, resp)
self.auth_token = resp['x-auth-token']
except KeyError:
raise exception.AuthorizationFailure()
elif resp.status == 305:
raise exception.AuthorizationRedirect(resp['location'])
elif resp.status == 400:
raise exception.AuthBadRequest(url=token_url)
elif resp.status == 401:
raise exception.NotAuthorized()
elif resp.status == 404:
raise exception.AuthUrlNotFound(url=token_url)
else:
status = resp.status
raise Exception(_('Unexpected response: %(status)s')
% {'status': resp.status})
|
KeyError
|
dataset/ETHPy150Open openstack-dev/heat-cfnclient/heat_cfnclient/common/auth.py/KeystoneStrategy._v1_auth
|
1,946
|
def _v2_auth(self, token_url):
def get_endpoint(service_catalog):
"""
Select an endpoint from the service catalog
We search the full service catalog for services
matching both type and region. If the client
supplied no region then any endpoint for the service
is considered a match. There must be one -- and
only one -- successful match in the catalog,
otherwise we will raise an exception.
"""
region = self.creds.get('region')
service_type_matches = lambda s: s.get('type') == self.service_type
region_matches = lambda e: region is None or e['region'] == region
endpoints = [ep for s in service_catalog if service_type_matches(s)
for ep in s['endpoints'] if region_matches(ep)]
if len(endpoints) > 1:
raise exception.RegionAmbiguity(region=region)
elif not endpoints:
raise exception.NoServiceEndpoint()
else:
# FIXME(sirp): for now just use the public url.
return endpoints[0]['publicURL']
creds = self.creds
creds = {
"auth": {
"tenantName": creds['tenant'],
"passwordCredentials": {
"username": creds['username'],
"password": creds['password']}}}
headers = {}
headers['Content-Type'] = 'application/json'
req_body = json.dumps(creds)
resp, resp_body = self._do_request(
token_url, 'POST', headers=headers, body=req_body)
if resp.status == 200:
resp_auth = json.loads(resp_body)['access']
self.management_url = get_endpoint(resp_auth['serviceCatalog'])
self.auth_token = resp_auth['token']['id']
elif resp.status == 305:
raise exception.RedirectException(resp['location'])
elif resp.status == 400:
raise exception.AuthBadRequest(url=token_url)
elif resp.status == 401:
raise exception.NotAuthorized()
elif resp.status == 404:
raise exception.AuthUrlNotFound(url=token_url)
else:
try:
body = json.loads(resp_body)
msg = body['error']['message']
except (__HOLE__, KeyError):
msg = resp_body
raise exception.KeystoneError(resp.status, msg)
|
ValueError
|
dataset/ETHPy150Open openstack-dev/heat-cfnclient/heat_cfnclient/common/auth.py/KeystoneStrategy._v2_auth
|
1,947
|
def call(self, files):
"""
This function pulls a ``FileInfo`` or ``TaskInfo`` object from
a list ``files``. Each object is then deemed if it will be a
multipart operation and add the necessary attributes if so. Each
object is then wrapped with a ``BasicTask`` object which is
essentially a thread of execution for a thread to follow. These
tasks are then submitted to the main executor.
"""
try:
self.executor.start()
total_files, total_parts = self._enqueue_tasks(files)
self.executor.print_thread.set_total_files(total_files)
self.executor.print_thread.set_total_parts(total_parts)
self.executor.initiate_shutdown()
self._finalize_shutdown()
except Exception as e:
LOGGER.debug('Exception caught during task execution: %s',
str(e), exc_info=True)
self.result_queue.put(PrintTask(message=str(e), error=True))
self.executor.initiate_shutdown(
priority=self.executor.IMMEDIATE_PRIORITY)
self._finalize_shutdown()
except __HOLE__:
self.result_queue.put(PrintTask(message=("Cleaning up. "
"Please wait..."),
error=True))
self.executor.initiate_shutdown(
priority=self.executor.IMMEDIATE_PRIORITY)
self._finalize_shutdown()
return CommandResult(self.executor.num_tasks_failed,
self.executor.num_tasks_warned)
|
KeyboardInterrupt
|
dataset/ETHPy150Open aws/aws-cli/awscli/customizations/s3/s3handler.py/S3Handler.call
|
1,948
|
@property
def raw_sensor_value(self):
"""
Returns the raw sensor value
:returns: the raw value read from the sensor
:rtype: int
:raises NoSensorFoundError: if the sensor could not be found
:raises SensorNotReadyError: if the sensor is not ready yet
"""
try:
with open(self.sensorpath, "r") as f:
data = f.readlines()
except __HOLE__:
raise NoSensorFoundError(self.type, self.id)
if data[0].strip()[-3:] != "YES":
raise SensorNotReadyError()
return float(data[1].split("=")[1])
|
IOError
|
dataset/ETHPy150Open timofurrer/w1thermsensor/w1thermsensor/core.py/W1ThermSensor.raw_sensor_value
|
1,949
|
@classmethod
def _get_unit_factor(cls, unit):
"""
Returns the unit factor depending on the unit constant
:param int unit: the unit of the factor requested
:returns: a function to convert the raw sensor value to the given unit
:rtype: lambda function
:raises UnsupportedUnitError: if the unit is not supported
"""
try:
if isinstance(unit, str):
unit = cls.UNIT_FACTOR_NAMES[unit]
return cls.UNIT_FACTORS[unit]
except __HOLE__:
raise UnsupportedUnitError()
|
KeyError
|
dataset/ETHPy150Open timofurrer/w1thermsensor/w1thermsensor/core.py/W1ThermSensor._get_unit_factor
|
1,950
|
def data(item=None, show_doc=False):
"""loads a datasaet (from in-modules datasets) in a dataframe data structure.
Args:
item (str) : name of the dataset to load.
show_doc (bool) : to show the dataset's documentation.
Examples:
>>> iris = data('iris')
>>> data('titanic', show_doc=True)
: returns the dataset's documentation.
>>> data()
: like help(), returns a dataframe [Item, Title]
for a list of the available datasets.
"""
if item:
try:
if show_doc:
__print_item_docs(item)
return
df = __read_csv(item)
return df
except __HOLE__:
find_similar(item)
else:
return __datasets_desc()
|
KeyError
|
dataset/ETHPy150Open iamaziz/PyDataset/pydataset/__init__.py/data
|
1,951
|
def connected(self, client):
"""Call this method when a client connected."""
self.clients.add(client)
self._log_connected(client)
self._start_watching(client)
self.send_msg(client, WELCOME, (self.pickle_protocol, __version__),
pickle_protocol=0)
profiler = self.profiler
while True:
try:
profiler = profiler.profiler
except __HOLE__:
break
self.send_msg(client, PROFILER, type(profiler))
if self._latest_result_data is not None:
try:
self._send(client, self._latest_result_data)
except socket.error as exc:
if exc.errno in (EBADF, EPIPE):
self.disconnected(client)
return
raise
if len(self.clients) == 1:
self._start_profiling()
|
AttributeError
|
dataset/ETHPy150Open what-studio/profiling/profiling/remote/__init__.py/ProfilingServer.connected
|
1,952
|
def possibly_quarantine(self, exc_type, exc_value, exc_traceback):
"""
Checks the exception info to see if it indicates a quarantine situation
(malformed or corrupted database). If not, the original exception will
be reraised. If so, the database will be quarantined and a new
sqlite3.DatabaseError will be raised indicating the action taken.
"""
if 'database disk image is malformed' in str(exc_value):
exc_hint = 'malformed'
elif 'file is encrypted or is not a database' in str(exc_value):
exc_hint = 'corrupted'
elif 'disk I/O error' in str(exc_value):
exc_hint = 'disk error while accessing'
else:
six.reraise(exc_type, exc_value, exc_traceback)
prefix_path = os.path.dirname(self.db_dir)
partition_path = os.path.dirname(prefix_path)
dbs_path = os.path.dirname(partition_path)
device_path = os.path.dirname(dbs_path)
quar_path = os.path.join(device_path, 'quarantined',
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path, fsync=False)
except __HOLE__ as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path, fsync=False)
detail = _('Quarantined %(db_dir)s to %(quar_path)s due to '
'%(exc_hint)s database') % {'db_dir': self.db_dir,
'quar_path': quar_path,
'exc_hint': exc_hint}
self.logger.error(detail)
raise sqlite3.DatabaseError(detail)
|
OSError
|
dataset/ETHPy150Open openstack/swift/swift/common/db.py/DatabaseBroker.possibly_quarantine
|
1,953
|
def put_record(self, record):
if self.db_file == ':memory:':
self.merge_items([record])
return
if not os.path.exists(self.db_file):
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
with lock_parent_directory(self.pending_file, self.pending_timeout):
pending_size = 0
try:
pending_size = os.path.getsize(self.pending_file)
except __HOLE__ as err:
if err.errno != errno.ENOENT:
raise
if pending_size > PENDING_CAP:
self._commit_puts([record])
else:
with open(self.pending_file, 'a+b') as fp:
# Colons aren't used in base64 encoding; so they are our
# delimiter
fp.write(':')
fp.write(pickle.dumps(
self.make_tuple_for_pickle(record),
protocol=PICKLE_PROTOCOL).encode('base64'))
fp.flush()
|
OSError
|
dataset/ETHPy150Open openstack/swift/swift/common/db.py/DatabaseBroker.put_record
|
1,954
|
def _commit_puts(self, item_list=None):
"""
Scan for .pending files and commit the found records by feeding them
to merge_items(). Assume that lock_parent_directory has already been
called.
:param item_list: A list of items to commit in addition to .pending
"""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
if item_list is None:
item_list = []
self._preallocate()
if not os.path.getsize(self.pending_file):
if item_list:
self.merge_items(item_list)
return
with open(self.pending_file, 'r+b') as fp:
for entry in fp.read().split(':'):
if entry:
try:
self._commit_puts_load(item_list, entry)
except Exception:
self.logger.exception(
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
os.ftruncate(fp.fileno(), 0)
except __HOLE__ as err:
if err.errno != errno.ENOENT:
raise
|
OSError
|
dataset/ETHPy150Open openstack/swift/swift/common/db.py/DatabaseBroker._commit_puts
|
1,955
|
def download_file(url, location, name_of_file, correct_hash):
"""Attempts to download a file from the internet to the specified location.
If the file is not found, or the retrieval of the file failed, the
exception will be caught and the system will exit.
Args:
url: A string of the URL to be retrieved.
location: A string of the path the file will be downloaded to.
name_of_file: A string with the name of the file, for printing purposes.
correct_hash: A string with the expected SHA-1 hash of the file
Returns:
String: The location of the file, if the download is successful. Will
return an empty string upon failure.
"""
try:
location, _ = urllib.urlretrieve(url, location)
assert get_file_hash(location) == correct_hash
print "\t" + name_of_file + " successfully downloaded."
return location
except __HOLE__:
sys.stderr.write("\t" + name_of_file + " failed to download.\n")
return ""
except AssertionError:
sys.stderr.write("\tIncorrect file downloaded. Please download " +
name_of_file + " manually.\n")
return ""
|
IOError
|
dataset/ETHPy150Open google/fplutil/setuputil/util.py/download_file
|
1,956
|
def wait_for_installation(program, command=False, search=False, basedir=""):
"""Once installation has started, poll until completion.
Once an asynchronous installation has started, wait for executable to exist.
Poll every second, until executable is found or user presses ctrl-c.
Args:
program: A string representing the name of the program that is being
installed.
command: True if the program name needs to be run in order to test
installation. False if it the executable can be searched for.
search: True if the executable will not be on the path, and must be searched
for, starting from the base directory given.
basedir: If search is true, start from this directory when looking for the
program. If search is false, basedir will be ignored.
Returns:
Boolean: Whether or not the the package finished installing
"""
print("Waiting for installation to complete.\nAlternately, press Ctrl-C to "
"quit, and rerun this script after installation has completed.")
try:
while command:
try:
subprocess.check_output(program, shell=True, stderr=subprocess.PIPE)
return True
except subprocess.CalledProcessError:
time.sleep(DELAY)
while search and not find_file(basedir, program):
time.sleep(DELAY)
while not search and not find_executable(program):
time.sleep(DELAY)
except __HOLE__:
sys.stderr.write("Setup exited before completion.")
return False
return True
|
KeyboardInterrupt
|
dataset/ETHPy150Open google/fplutil/setuputil/util.py/wait_for_installation
|
1,957
|
def test_dtypes():
batch_size = 2
dtype_is_none_msg = ("self.dtype is None, so you must provide a "
"non-None dtype argument to this method.")
all_scalar_dtypes = tuple(t.dtype
for t in theano.scalar.all_types)
def underspecifies_dtypes(from_space, to_dtype):
"""
Returns True iff the from_space and to_dtype are both None. If
from_space is a CompositeSpace, this recurses into its tree of
subspaces.
"""
if isinstance(from_space, CompositeSpace):
if not isinstance(to_dtype, tuple):
return any(underspecifies_dtypes(s, to_dtype)
for s in from_space.components)
else:
return any(underspecifies_dtypes(s, d)
for s, d
in safe_zip(from_space.components, to_dtype))
else:
assert not isinstance(to_dtype, tuple), ("Tree structure "
"mismatch between "
"from_space and "
"to_dtype.")
return from_space.dtype is None and to_dtype is None
def get_expected_batch_dtype(from_space, to_dtype):
"""
Returns the expected dtype of a batch returned from
from_space.f(batch, to_dtype), where f is one of the three batch
creation methods (get_origin_batch, make_theano_batch, and
make_shared_batch)
"""
if to_dtype == 'floatX':
to_dtype = theano.config.floatX
if isinstance(from_space, CompositeSpace):
if not isinstance(to_dtype, tuple):
to_dtype = (to_dtype, ) * len(from_space.components)
return tuple(get_expected_batch_dtype(subspace, subtype)
for subspace, subtype
in safe_zip(from_space.components, to_dtype))
else:
assert not (from_space.dtype is None and to_dtype is None)
return from_space.dtype if to_dtype is None else to_dtype
def get_batch_dtype(batch):
"""
Returns the dtype of a batch, as a string, or nested tuple of strings.
For simple batches such as ndarray, this returns str(batch.dtype).
For the None batches "used" by NullSpace, this returns a special string
"NullSpace dtype".
For composite batches, this returns (nested) tuples of dtypes.
"""
if isinstance(batch, tuple):
return tuple(get_batch_dtype(b) for b in batch)
elif batch is None:
return "NullSpace dtype"
else:
return batch.dtype
def test_get_origin_batch(from_space, to_type):
# Expect failure if neither we nor the from_space specifies a dtype
if underspecifies_dtypes(from_space, to_type):
try:
from_space.get_origin_batch(batch_size, dtype=to_type)
except TypeError as ex:
assert dtype_is_none_msg in str(ex)
except Exception as unexpected_ex:
print("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.get_origin_batch(batch_size, dtype=to_type)
assert get_batch_dtype(batch) == get_expected_batch_dtype(from_space,
to_type)
def test_make_shared_batch(from_space, to_type):
if underspecifies_dtypes(from_space, to_type):
try:
from_space.make_shared_batch(batch_size, dtype=to_type)
except __HOLE__ as ex:
assert dtype_is_none_msg in str(ex)
except Exception as unexpected_ex:
print("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.make_shared_batch(batch_size=batch_size,
name='batch',
dtype=to_type)
assert (get_batch_dtype(batch) ==
get_expected_batch_dtype(from_space, to_type)), \
("\nget_batch_dtype(batch): %s\n"
"get_expected_batch_dtype(from_space, to_type): %s" %
(get_batch_dtype(batch),
get_expected_batch_dtype(from_space, to_type)))
def test_make_theano_batch(from_space, to_type):
kwargs = {'name': 'batch',
'dtype': to_type}
# Sparse VectorSpaces throw an exception if batch_size is specified.
if not (isinstance(from_space, VectorSpace) and from_space.sparse):
kwargs['batch_size'] = batch_size
if underspecifies_dtypes(from_space, to_type):
try:
from_space.make_theano_batch(**kwargs)
except TypeError as ex:
assert dtype_is_none_msg in str(ex)
except Exception as unexpected_ex:
print("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.make_theano_batch(**kwargs)
assert get_batch_dtype(batch) == get_expected_batch_dtype(from_space,
to_type)
def test_format(from_space, to_space, using_numeric_batch):
"""
Unit test for a call to from_space.np_format_as(batch, to_space)
"""
# Type-checks the arguments
for space, name in zip((from_space, to_space),
("from_space", "to_space")):
if not isinstance(space,
(VectorSpace, Conv2DSpace, CompositeSpace)):
raise TypeError("This test only supports spaces of type "
"VectorSpace, Conv2DSpace, and "
"CompositeSpace, not %s's type %s" %
(name, type(space)))
def get_batch(space, using_numeric_batch):
"""
Uses space.get_origin_batch() to return a numeric batch,
or space.get_theano_batch() to return a symbolic
Uses a fallback dtype if the space itself doesn't have one.
"""
def specifies_all_dtypes(space):
"""
Returns True iff space has a completely specified dtype.
"""
if isinstance(space, CompositeSpace):
return all(specifies_all_dtypes(subspace)
for subspace in space.components)
else:
return space.dtype is not None
def replace_none_dtypes(dtype, fallback_dtype):
"""
Returns dtype, with any Nones replaced by fallback_dtype.
"""
if isinstance(dtype, tuple):
return tuple(replace_none_dtypes(d, fallback_dtype)
for d in dtype)
else:
return fallback_dtype if dtype is None else dtype
kwargs = {"batch_size": batch_size}
# Use this when space doesn't specify a dtype
fallback_dtype = theano.config.floatX
if not specifies_all_dtypes(space):
kwargs["dtype"] = replace_none_dtypes(space.dtype,
fallback_dtype)
if using_numeric_batch:
return space.get_origin_batch(**kwargs)
else:
# Sparse VectorSpaces throw an exception if batch_size is
# specified
if isinstance(space, VectorSpace) and space.sparse:
del kwargs["batch_size"]
kwargs["name"] = "space-generated batch"
return space.make_theano_batch(**kwargs)
def get_expected_warning(from_space, from_batch, to_space):
# composite -> composite
if isinstance(from_space, CompositeSpace) and \
isinstance(to_space, CompositeSpace):
for fs, fb, ts in safe_zip(from_space.components,
from_batch,
to_space.components):
warning, message = get_expected_warning(fs, fb, ts)
if warning is not None:
return warning, message
return None, None
# composite -> simple
if isinstance(from_space, CompositeSpace):
for fs, fb in safe_zip(from_space.components, from_batch):
warning, message = get_expected_warning(fs, fb, to_space)
if warning is not None:
return warning, message
return None, None
# simple -> composite
if isinstance(to_space, CompositeSpace):
if isinstance(from_space, VectorSpace) and \
isinstance(from_batch, theano.sparse.SparseVariable):
assert from_space.sparse
return (UserWarning,
'Formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why.')
for ts in to_space.components:
warning, message = get_expected_warning(from_space,
from_batch,
ts)
if warning is not None:
return warning, message
return None, None
# simple -> simple
return None, None
def get_expected_error(from_space, from_batch, to_space):
"""
Returns the type of error to be expected when calling
from_space.np_format_as(batch, to_space). Returns None if no error
should be expected.
"""
def contains_different_dtypes(space):
"""
Returns true if space contains different dtypes. None is
considered distinct from all actual dtypes.
"""
assert isinstance(space, CompositeSpace)
def get_shared_dtype_if_any(space):
"""
Returns space's dtype. If space is composite, returns the
dtype used by all of its subcomponents. Returns False if
the subcomponents use different dtypes.
"""
if isinstance(space, CompositeSpace):
dtypes = tuple(get_shared_dtype_if_any(c)
for c in space.components)
assert(len(dtypes) > 0)
if any(d != dtypes[0] for d in dtypes[1:]):
return False
return dtypes[0] # could be False, but that's fine
else:
return space.dtype
return get_shared_dtype_if_any(space) is False
assert (isinstance(from_space, CompositeSpace) ==
isinstance(from_batch, tuple))
# composite -> composite
if isinstance(from_space, CompositeSpace) and \
isinstance(to_space, CompositeSpace):
for fs, fb, ts in safe_zip(from_space.components,
from_batch,
to_space.components):
error, message = get_expected_error(fs, fb, ts)
if error is not None:
return error, message
return None, None
# composite -> simple
if isinstance(from_space, CompositeSpace):
if isinstance(to_space, Conv2DSpace):
return (NotImplementedError,
"CompositeSpace does not know how to format as "
"Conv2DSpace")
for fs, fb in safe_zip(from_space.components, from_batch):
error, message = get_expected_error(fs, fb, to_space)
if error is not None:
return error, message
if isinstance(to_space, VectorSpace) and \
contains_different_dtypes(from_space) and \
to_space.dtype is None:
return (TypeError,
"Tried to format components with differing dtypes "
"into a VectorSpace with no dtype of its own. "
"dtypes: ")
return None, None
# simple -> composite
if isinstance(to_space, CompositeSpace):
if isinstance(from_space, VectorSpace) and \
isinstance(from_batch, theano.sparse.SparseVariable):
assert from_space.sparse
return (UserWarning,
'Formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why.')
if isinstance(from_space, Conv2DSpace):
return (NotImplementedError,
"Conv2DSpace does not know how to format as "
"CompositeSpace")
for ts in to_space.components:
error, message = get_expected_error(from_space,
from_batch,
ts)
if error is not None:
return error, message
return None, None
#
# simple -> simple
#
def is_sparse(space):
return isinstance(space, VectorSpace) and space.sparse
def is_complex(arg):
"""
Returns whether a space or a batch has a complex dtype.
"""
return (arg.dtype is not None and
str(arg.dtype).startswith('complex'))
if isinstance(from_batch, tuple):
return (TypeError,
"This space only supports simple dtypes, but received "
"a composite batch.")
if is_complex(from_batch) and not is_complex(from_space):
return (TypeError,
"This space has a non-complex dtype (%s), and "
"thus cannot support complex batches of type %s." %
(from_space.dtype, from_batch.dtype))
if from_space.dtype is not None and \
from_space.dtype != from_batch.dtype:
return (TypeError,
"This space is for dtype %s, but recieved a "
"batch of dtype %s." %
(from_space.dtype, from_batch.dtype))
if is_sparse(from_space) and isinstance(to_space, Conv2DSpace):
return (TypeError,
"Formatting a SparseVariable to a Conv2DSpace "
"is not supported, since neither scipy nor "
"Theano has sparse tensors with more than 2 "
"dimensions. We need 4 dimensions to "
"represent a Conv2DSpace batch")
if is_complex(from_space) and not is_complex(to_space):
if is_symbolic_batch(from_batch):
return (TypeError,
"Casting from complex to real is ambiguous")
else:
return (np.ComplexWarning,
"Casting complex values to real discards the "
"imaginary part")
return None, None
def get_expected_formatted_dtype(from_batch, to_space):
"""
Returns the expected dtype of the batch returned from a call to
from_batch.format_as(batch, to_space). If the returned batch is a
nested tuple, the expected dtype will also a nested tuple.
"""
def get_single_dtype(batch):
"""
Returns the dtype shared by all leaf nodes of the nested batch.
If the nested batch contains differing dtypes, this throws an
AssertionError. None counts as a different dtype than non-None.
"""
if isinstance(batch, tuple):
assert len(batch) > 0
child_dtypes = tuple(get_single_dtype(b) for b in batch)
if any(c != child_dtypes[0] for c in child_dtypes[1:]):
return False
return child_dtypes[0] # may be False, but that's correct.
else:
return batch.dtype
# composite -> composite
if isinstance(from_batch, tuple) and \
isinstance(to_space, CompositeSpace):
return tuple(get_expected_formatted_dtype(b, s)
for b, s in safe_zip(from_batch,
to_space.components))
# composite -> simple
elif isinstance(from_batch, tuple):
if to_space.dtype is not None:
return to_space.dtype
else:
result = get_batch_dtype(from_batch)
if result is False:
raise TypeError("From_batch doesn't have a single "
"dtype: %s" %
str(get_batch_dtype(from_batch)))
return result
# simple -> composite
elif isinstance(to_space, CompositeSpace):
return tuple(get_expected_formatted_dtype(from_batch, s)
for s in to_space.components)
# simple -> simple with no dtype
elif to_space.dtype is None:
assert from_batch.dtype is not None
return str(from_batch.dtype)
# simple -> simple with a dtype
else:
return to_space.dtype
from_batch = get_batch(from_space, using_numeric_batch)
expected_error, expected_error_msg = get_expected_error(from_space,
from_batch,
to_space)
# For some reason, the "with assert_raises(expected_error) as context:"
# idiom isn't catching all the expceted_errors. Use this instead:
if expected_error is not None:
try:
# temporarily upgrades warnings to exceptions within this block
with warnings.catch_warnings():
warnings.simplefilter("error")
from_space._format_as(using_numeric_batch,
from_batch,
to_space)
except expected_error as ex:
assert str(ex).find(expected_error_msg) >= 0
except Exception as unknown_ex:
print("Expected exception of type %s, got %s." %
(expected_error.__name__, type(unknown_ex)))
raise unknown_ex
finally:
return
to_batch = from_space._format_as(using_numeric_batch,
from_batch,
to_space)
expected_dtypes = get_expected_formatted_dtype(from_batch, to_space)
actual_dtypes = get_batch_dtype(to_batch)
assert expected_dtypes == actual_dtypes, \
("\nexpected_dtypes: %s,\n"
"actual_dtypes: %s \n"
"from_space: %s\n"
"from_batch's dtype: %s\n"
"from_batch is theano?: %s\n"
"to_space: %s" % (expected_dtypes,
actual_dtypes,
from_space,
get_batch_dtype(from_batch),
is_symbolic_batch(from_batch),
to_space))
#
#
# End of test_format() function.
def test_dtype_getter(space):
"""
Tests the getter method of space's dtype property.
"""
def assert_composite_dtype_eq(space, dtype):
"""
Asserts that dtype is a nested tuple with exactly the same tree
structure as space, and that the dtypes of space's components and
their corresponding elements in <dtype> are equal.
"""
assert (isinstance(space, CompositeSpace) ==
isinstance(dtype, tuple))
if isinstance(space, CompositeSpace):
for s, d in safe_zip(space.components, dtype):
assert_composite_dtype_eq(s, d)
else:
assert space.dtype == dtype
if isinstance(space, SimplyTypedSpace):
assert space.dtype == space._dtype
elif isinstance(space, NullSpace):
assert space.dtype == "NullSpace's dtype"
elif isinstance(space, CompositeSpace):
assert_composite_dtype_eq(space, space.dtype)
def test_dtype_setter(space, dtype):
"""
Tests the setter method of space's dtype property.
"""
def get_expected_error(space, dtype):
"""
If calling space.dtype = dtype is expected to throw an exception,
this returns (exception_class, exception_message).
If no exception is to be expected, this returns (None, None).
"""
if isinstance(space, CompositeSpace):
if isinstance(dtype, tuple):
if len(space.components) != len(dtype):
return ValueError, "Argument 0 has length "
for s, d in safe_zip(space.components, dtype):
error, message = get_expected_error(s, d)
if error is not None:
return error, message
else:
for s in space.components:
error, message = get_expected_error(s, dtype)
if error is not None:
return error, message
return None, None
if isinstance(space, SimplyTypedSpace):
if not any((dtype is None,
dtype == 'floatX',
dtype in all_scalar_dtypes)):
return (TypeError,
'Unrecognized value "%s" (type %s) for dtype arg' %
(dtype, type(dtype)))
return None, None
if isinstance(space, NullSpace):
nullspace_dtype = NullSpace().dtype
if dtype != nullspace_dtype:
return (TypeError,
'NullSpace can only take the bogus dtype "%s"' %
nullspace_dtype)
return None, None
raise NotImplementedError("%s not yet supported by this test" %
type(space))
def assert_dtype_equiv(space, dtype):
"""
Asserts that space.dtype and dtype are equivalent.
"""
if isinstance(space, CompositeSpace):
if isinstance(dtype, tuple):
for s, d in safe_zip(space.components, dtype):
assert_dtype_equiv(s, d)
else:
for s in space.components:
assert_dtype_equiv(s, dtype)
else:
assert not isinstance(dtype, tuple)
if dtype == 'floatX':
dtype = theano.config.floatX
assert space.dtype == dtype, ("%s not equal to %s" %
(space.dtype, dtype))
expected_error, expected_message = get_expected_error(space, dtype)
if expected_error is not None:
try:
space.dtype = dtype
except expected_error as ex:
assert expected_message in str(ex)
except Exception:
print("Expected exception of type %s, got %s instead." %
(expected_error.__name__, type(ex)))
raise ex
return
else:
space.dtype = dtype
assert_dtype_equiv(space, dtype)
def test_simply_typed_space_validate(space, batch_dtype, is_numeric):
"""
Creates a batch of batch_dtype, and sees if space validates it.
"""
assert isinstance(space, SimplyTypedSpace), \
"%s is not a SimplyTypedSpace" % type(space)
batch_sizes = (1, 3)
if not is_numeric and isinstance(space, VectorSpace) and space.sparse:
batch_sizes = (None, )
for batch_size in batch_sizes:
if is_numeric:
batch = space.get_origin_batch(dtype=batch_dtype,
batch_size=batch_size)
else:
batch = space.make_theano_batch(dtype=batch_dtype,
batch_size=batch_size,
name="test batch to validate")
# Expect an error if space.dtype is not None and batch can't cast
# to it.
if space.dtype is not None and \
not np.can_cast(batch.dtype, space.dtype):
np.testing.assert_raises(TypeError,
space._validate,
(is_numeric, batch))
else:
# Otherwise, don't expect an error.
space._validate(is_numeric, batch)
#
#
# End of test_dtype_setter() function
shape = np.array([2, 3, 4], dtype='int')
assert len(shape) == 3 # This test depends on this being true
dtypes = ('floatX', None) + all_scalar_dtypes
#
# spaces with the same number of elements
#
vector_spaces = tuple(VectorSpace(dim=shape.prod(), dtype=dt, sparse=s)
for dt in dtypes for s in (True, False))
conv2d_spaces = tuple(Conv2DSpace(shape=shape[:2],
dtype=dt,
num_channels=shape[2])
for dt in dtypes)
# no need to make CompositeSpaces with components spanning all possible
# dtypes. Just try 2 dtype combos. No need to try different sparsities
# either. That will be tested by the non-composite space conversions.
n_dtypes = 2
old_nchannels = shape[2]
shape[2] = old_nchannels / 2
assert shape[2] * 2 == old_nchannels, \
("test code is broken: # of channels should start as an even "
"number, not %d." % old_nchannels)
def make_composite_space(dtype0, dtype1, use_conv2d):
if use_conv2d:
second_space = Conv2DSpace(shape=shape[:2],
dtype=dtype1,
num_channels=shape[2])
else:
second_space = VectorSpace(dim=np.prod(shape),
dtype=dtype1)
return CompositeSpace((VectorSpace(dim=shape.prod(), dtype=dtype0),
second_space))
composite_spaces = tuple(make_composite_space(dtype0, dtype1, use_conv2d)
for dtype0, dtype1 in zip(dtypes[:n_dtypes],
dtypes[-n_dtypes:])
for use_conv2d in [True, False])
del n_dtypes
# A few composite dtypes to try throwing at CompositeSpace's batch-making
# methods.
composite_dtypes = ((None, 'int8'),
('complex128', theano.config.floatX))
# Tests CompositeSpace's batch-making methods and dtype setter
# with composite dtypes
for from_space in composite_spaces:
for to_dtype in composite_dtypes:
test_get_origin_batch(from_space, to_dtype)
test_make_shared_batch(from_space, to_dtype)
test_make_theano_batch(from_space, to_dtype)
test_dtype_setter(from_space, to_dtype)
# Tests validate/np_validate() for SimplyTypedSpaces
for is_numeric in (True, False):
for space in vector_spaces + conv2d_spaces:
for batch_dtype in ('floatX', ) + all_scalar_dtypes:
# Skip the test if the symbolic SparseType does not implement
# that dtype. As of 2015-05-07, this happens for 'float16'.
if ((isinstance(space, VectorSpace) and
space.sparse and
batch_dtype in all_scalar_dtypes and
batch_dtype not in theano.sparse.SparseType.dtype_set)):
continue
test_simply_typed_space_validate(space,
batch_dtype,
is_numeric)
all_spaces = vector_spaces + conv2d_spaces + composite_spaces
for from_space in all_spaces:
test_dtype_getter(from_space)
# Tests batch-making and dtype setting methods with non-composite
# dtypes.
for to_dtype in dtypes:
# Skip the test if the symbolic SparseType does not implement
# that dtype. As of 2015-05-07, this happens for 'float16'.
if ((isinstance(from_space, VectorSpace) and
from_space.sparse and
to_dtype in all_scalar_dtypes and
to_dtype not in theano.sparse.SparseType.dtype_set)):
continue
test_get_origin_batch(from_space, to_dtype)
test_make_shared_batch(from_space, to_dtype)
test_make_theano_batch(from_space, to_dtype)
test_dtype_setter(from_space, to_dtype)
# Tests _format_as
for to_space in all_spaces:
# Skip the test if the symbolic SparseType does not implement
# that dtype. As of 2015-05-07, this happens for 'float16'.
if ((isinstance(to_space, VectorSpace) and
to_space.sparse and
to_space.dtype in all_scalar_dtypes and
to_space.dtype not in theano.sparse.SparseType.dtype_set)):
continue
for is_numeric in (True, False):
test_format(from_space, to_space, is_numeric)
|
TypeError
|
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/space/tests/test_space.py/test_dtypes
|
1,958
|
def parse_encoding(fp):
"""Deduce the encoding of a source file from magic comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object.
(From Jeff Dairiki)
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = PYTHON_MAGIC_COMMENT_re.match(line1)
if not m:
try:
import parser
parser.suite(line1)
except (__HOLE__, SyntaxError):
# Either it's a real syntax error, in which case the source is
# not valid python source, or line2 is a continuation of line1,
# in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = PYTHON_MAGIC_COMMENT_re.match(line2)
if has_bom:
if m:
raise SyntaxError(
"python refuses to compile code with both a UTF8 "
"byte-order-mark and a magic encoding comment")
return 'utf_8'
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos)
|
ImportError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Babel-0.9.6/babel/util.py/parse_encoding
|
1,959
|
def check_dependencies(self, obj, failed):
for dep in self.dependencies:
peer_name = dep[0].lower() + dep[1:] # django names are camelCased with the first letter lower
try:
peer_object = deepgetattr(obj, peer_name)
try:
peer_objects = peer_object.all()
except AttributeError:
peer_objects = [peer_object]
except:
peer_objects = []
if (hasattr(obj,'controller')):
try:
peer_objects = filter(lambda o:o.controller==obj.controller, peer_objects)
except __HOLE__:
pass
if (failed in peer_objects):
if (obj.backend_status!=failed.backend_status):
obj.backend_status = failed.backend_status
obj.save(update_fields=['backend_status'])
raise FailedDependency("Failed dependency for %s:%s peer %s:%s failed %s:%s" % (obj.__class__.__name__, str(getattr(obj,"pk","no_pk")), peer_object.__class__.__name__, str(getattr(peer_object,"pk","no_pk")), failed.__class__.__name__, str(getattr(failed,"pk","no_pk"))))
|
AttributeError
|
dataset/ETHPy150Open open-cloud/xos/xos/synchronizers/base/syncstep-portal.py/SyncStep.check_dependencies
|
1,960
|
@staticmethod
def _extract_from_file(file_pattern, file_description, extract_function):
"""Extract metadata from a file.
Returns the result of running extract_function on the opened
file, or None if the file cannot be found. file_pattern is a
glob pattern for the file: the first file found is used.
file_description is a description of the file for logging and
error messages."""
try:
md_path = glob.glob(file_pattern)[0]
md_file = open(md_path)
metadata = extract_function(md_file)
md_file.close()
except IndexError: # File not found
metadata = None
LOGGER.debug('No %s file found.', file_description)
except __HOLE__: # Open failed
raise DatasetError('Unable to open %s file.' % file_description)
return metadata
|
IOError
|
dataset/ETHPy150Open GeoscienceAustralia/agdc/src/landsat_ingester/landsat_dataset.py/LandsatDataset._extract_from_file
|
1,961
|
def get_start_datetime(self):
"""The start of the acquisition.
This is a datetime without timezone in UTC.
"""
# Use the alternate time if available (from EODS_DATASET metadata).
try:
start_dt = self._ds.scene_alt_start_datetime
except __HOLE__:
start_dt = None
# Othewise use the original time (calcualted from scene_centre_time).
if start_dt is None:
start_dt = self._ds.scene_start_datetime
return start_dt
|
AttributeError
|
dataset/ETHPy150Open GeoscienceAustralia/agdc/src/landsat_ingester/landsat_dataset.py/LandsatDataset.get_start_datetime
|
1,962
|
def get_end_datetime(self):
"""The end of the acquisition.
This is a datatime without timezone in UTC.
"""
# Use the alternate time if available (from EODS_DATASET metadata).
try:
end_dt = self._ds.scene_alt_end_datetime
except __HOLE__:
end_dt = None
# Othewise use the original time (calcualted from scene_centre_time).
if end_dt is None:
end_dt = self._ds.scene_end_datetime
return end_dt
|
AttributeError
|
dataset/ETHPy150Open GeoscienceAustralia/agdc/src/landsat_ingester/landsat_dataset.py/LandsatDataset.get_end_datetime
|
1,963
|
def get_pq_tests_run(self):
"""The tests run for a Pixel Quality dataset.
This is a 16 bit integer with the bits acting as flags. 1 indicates
that the test was run, 0 that it was not.
"""
# None value provided for pq_tests_run value in case PQA metadata
# extraction fails due to out of date version of SceneDataset.
# This should be a temporary measure.
try:
pq_tests_run = self._ds.pq_tests_run
except __HOLE__:
pq_tests_run = None
return pq_tests_run
#
# Methods used for tiling
#
|
AttributeError
|
dataset/ETHPy150Open GeoscienceAustralia/agdc/src/landsat_ingester/landsat_dataset.py/LandsatDataset.get_pq_tests_run
|
1,964
|
def log_entry( self, logger, entry ):
# check to see if redis truncated the command
match = MORE_BYTES.search( entry['command'] )
if match:
pos, length = match.span()
pos -= 1
# find the first byte which is not a 'middle' byte in utf8
# middle bytes always begin with b10xxxxxx which means they
# will be >= b10000000 and <= b10111111
while pos > 0 and 0x80 <= ord( entry['command'][pos] ) <= 0xBF:
pos -= 1
# at this point, entry['command'][pos] will either be a single byte character or
# the start of a truncated multibyte character.
# If it's a single character, skip over it so it's included in the slice
# If it's the start of a truncated multibyte character don't do anything
# and the truncated bytes will be removed with the slice
if ord( entry['command'][pos] ) < 0x80:
pos += 1
#slice off any unwanted parts of the string
entry['command'] = entry['command'][:pos] + match.group()
command = ""
try:
command = entry['command'].decode( 'utf8' )
except __HOLE__, e:
if self.utf8_warning_interval:
logger.warn( "Redis command contains invalid utf8: %s" % binascii.hexlify( entry['command'] ), limit_once_per_x_secs=self.utf8_warning_interval, limit_key="redis-utf8" )
command = entry['command'].decode( 'utf8', errors="replace" )
time_format = "%Y-%m-%d %H:%M:%SZ"
logger.emit_value( 'redis', 'slowlog', extra_fields={
'host': self.display_string,
'ts': time.strftime( time_format, time.gmtime( entry['start_time'] ) ),
'exectime' : entry['duration'],
'command' : command
} )
self.last_id = entry['id']
self.last_timestamp = entry['start_time']
|
UnicodeDecodeError
|
dataset/ETHPy150Open scalyr/scalyr-agent-2/scalyr_agent/builtin_monitors/redis_monitor.py/RedisHost.log_entry
|
1,965
|
def inject_config(consumer, properties):
for prop in properties:
try:
if ":" in prop:
property_name, value = prop.split(':')[1], _section_value_map[prop]
else:
property_name, value = prop, _value_map[prop]
except KeyError:
log.error("No value found for configuration value {} in consumer {}", prop, consumer.__name__)
continue
try:
old_value = getattr(consumer, property_name)
if old_value == value:
return
log.info("Updating config {} from {} to {} in {}.", property_name, old_value, value, consumer.__name__)
except __HOLE__:
log.info("Setting config {}: {} in {}.", property_name, value, consumer.__name__)
setattr(consumer, property_name, value)
if hasattr(consumer, '_on_configured'):
consumer._on_configured()
|
AttributeError
|
dataset/ETHPy150Open genzgd/Lampost-Mud/lampost/context/config.py/inject_config
|
1,966
|
def calculate_scenario_stats(self, nth_pctile=95, format_numbers=True):
"""Compute various statistics from worker job result dicts.
:param nth_pctile: Use this percentile when calculating the stats
:param format_numbers: Should various floating-point numbers be
formatted as strings or left full-precision floats
:returns: A stats python dict which looks something like:
SERIES_STATS = {
'min': 1.1,
'max': 1.1,
'avg': 1.1,
'std_dev': 1.1,
'median': 1.1,
}
{
'agg_stats': {
'worker_count': 1,
'start': 1.1,
'stop': 1.1,
'req_count': 1,
'retries': 0,
'errors' : 0,
'avg_req_per_sec': 1.1, # req_count / (stop - start)?
'retry_rate': 0.0,
'first_byte_latency': SERIES_STATS,
'last_byte_latency': SERIES_STATS,
},
'worker_stats': {
1: { # keys are worker_ids
'start': 1.1,
'stop': 1.1,
'req_count': 1,
'retries': 0,
'retry_rate': 0.0,
'errors': 0,
'avg_req_per_sec': 1.1, # req_count / (stop - start)?
'first_byte_latency': SERIES_STATS,
'last_byte_latency': SERIES_STATS,
},
# ...
},
'op_stats': {
CREATE_OBJECT: { # keys are CRUD constants: CREATE_OBJECT, READ_OBJECT, etc.
'req_count': 1, # num requests of this CRUD type
'avg_req_per_sec': 1.1, # total_requests / sum(last_byte_latencies)
'first_byte_latency': SERIES_STATS,
'last_byte_latency': SERIES_STATS,
'size_stats': {
'small': { # keys are size_str values
'req_count': 1, # num requests of this type and size
'retries': 0, # num of retries
'avg_req_per_sec': 1.1, # total_requests / sum(last_byte_latencies)
'errors': 0,
'retry_rate': 0.0,
'first_byte_latency': SERIES_STATS,
'last_byte_latency': SERIES_STATS,
},
# ...
},
},
# ...
},
'size_stats': {
'small': { # keys are size_str values
'req_count': 1, # num requests of this size (for all CRUD types)
'retries': 0, # num of retries
'acutual_request_count': 1, # num requests includes retries
'avg_req_per_sec': 1.1, # total_requests / sum(last_byte_latencies)
'errors': 0,
'retry_rate': 0.0,
'first_byte_latency': SERIES_STATS,
'last_byte_latency': SERIES_STATS,
},
# ...
},
'time_series': {
'start': 1, # epoch time of first data point
'data': [
1, # number of requests finishing during this second
# ...
],
},
}
"""
# Each result looks like:
# {
# 'worker_id': 1,
# 'type': 'get_object',
# 'size': 4900000,
# 'size_str': 'large',
# 'first_byte_latency': 0.9137639999389648,
# 'last_byte_latency': 0.913769006729126,
# 'retries': 1
# 'completed_at': 1324372892.360802,
# }
# OR
# {
# 'worker_id': 1,
# 'type': 'get_object',
# 'size_str': 'large'
# 'completed_at': 1324372892.360802,
# 'retries': 1
# 'exception': '...',
# }
logging.info('Calculating statistics...')
agg_stats = dict(start=2 ** 32, stop=0, req_count=0)
op_stats = {}
for crud_type in [ssbench.CREATE_OBJECT, ssbench.READ_OBJECT,
ssbench.UPDATE_OBJECT, ssbench.DELETE_OBJECT]:
op_stats[crud_type] = dict(
req_count=0, avg_req_per_sec=0,
size_stats=OrderedDict.fromkeys(
self.scenario.sizes_by_name.keys()))
req_completion_seconds = {}
start_time = 0
completion_time_max = 0
completion_time_min = 2 ** 32
stats = dict(
nth_pctile=nth_pctile,
agg_stats=agg_stats,
worker_stats={},
op_stats=op_stats,
size_stats=OrderedDict.fromkeys(
self.scenario.sizes_by_name.keys()))
for results in self.unpacker:
skipped = 0
for result in results:
try:
res_completed_at = result['completed_at']
res_completion_time = int(res_completed_at)
res_worker_id = result['worker_id']
res_type = result['type']
res_size_str = result['size_str']
except __HOLE__ as err:
logging.info('Skipped result with missing keys (%r): %r',
err, result)
skipped += 1
continue
try:
res_exception = result['exception']
except KeyError:
try:
res_last_byte_latency = result['last_byte_latency']
except KeyError:
logging.info('Skipped result with missing'
' last_byte_latency key: %r',
result)
skipped += 1
continue
if res_completion_time < completion_time_min:
completion_time_min = res_completion_time
start_time = (
res_completion_time - res_last_byte_latency)
if res_completion_time > completion_time_max:
completion_time_max = res_completion_time
req_completion_seconds[res_completion_time] = \
1 + req_completion_seconds.get(res_completion_time, 0)
result['start'] = res_completed_at - res_last_byte_latency
else:
# report log exceptions
logging.warn('calculate_scenario_stats: exception from '
'worker %d: %s',
res_worker_id, res_exception)
try:
res_traceback = result['traceback']
except KeyError:
logging.warn('traceback missing')
else:
logging.info(res_traceback)
# Stats per-worker
if res_worker_id not in stats['worker_stats']:
stats['worker_stats'][res_worker_id] = {}
self._add_result_to(stats['worker_stats'][res_worker_id],
result)
# Stats per-file-size
try:
val = stats['size_stats'][res_size_str]
except KeyError:
stats['size_stats'][res_size_str] = {}
else:
if not val:
stats['size_stats'][res_size_str] = {}
self._add_result_to(stats['size_stats'][res_size_str],
result)
self._add_result_to(agg_stats, result)
type_stats = op_stats[res_type]
self._add_result_to(type_stats, result)
# Stats per-operation-per-file-size
try:
val = type_stats['size_stats'][res_size_str]
except KeyError:
type_stats['size_stats'][res_size_str] = {}
else:
if not val:
type_stats['size_stats'][res_size_str] = {}
self._add_result_to(
type_stats['size_stats'][res_size_str], result)
if skipped > 0:
logging.warn("Total number of results skipped: %d", skipped)
agg_stats['worker_count'] = len(stats['worker_stats'].keys())
self._compute_req_per_sec(agg_stats)
self._compute_retry_rate(agg_stats)
self._compute_latency_stats(agg_stats, nth_pctile, format_numbers)
jobs_per_worker = []
for worker_stats in stats['worker_stats'].values():
jobs_per_worker.append(worker_stats['req_count'])
self._compute_req_per_sec(worker_stats)
self._compute_retry_rate(worker_stats)
self._compute_latency_stats(worker_stats, nth_pctile,
format_numbers)
stats['jobs_per_worker_stats'] = self._series_stats(jobs_per_worker,
nth_pctile,
format_numbers)
logging.debug('Jobs per worker stats:\n' +
pformat(stats['jobs_per_worker_stats']))
for op_stats_dict in op_stats.itervalues():
if op_stats_dict['req_count']:
self._compute_req_per_sec(op_stats_dict)
self._compute_retry_rate(op_stats_dict)
self._compute_latency_stats(op_stats_dict, nth_pctile,
format_numbers)
for size_str, size_stats in \
op_stats_dict['size_stats'].iteritems():
if size_stats:
self._compute_req_per_sec(size_stats)
self._compute_retry_rate(size_stats)
self._compute_latency_stats(size_stats, nth_pctile,
format_numbers)
else:
op_stats_dict['size_stats'].pop(size_str)
for size_str, size_stats in stats['size_stats'].iteritems():
if size_stats:
self._compute_req_per_sec(size_stats)
self._compute_retry_rate(size_stats)
self._compute_latency_stats(size_stats, nth_pctile,
format_numbers)
else:
stats['size_stats'].pop(size_str)
time_series_data = [req_completion_seconds.get(t, 0)
for t in range(completion_time_min,
completion_time_max + 1)]
stats['time_series'] = dict(start=completion_time_min,
start_time=start_time,
stop=completion_time_max,
data=time_series_data)
return stats
|
KeyError
|
dataset/ETHPy150Open swiftstack/ssbench/ssbench/reporter.py/Reporter.calculate_scenario_stats
|
1,967
|
def _compute_latency_stats(self, stat_dict, nth_pctile, format_numbers):
try:
for latency_type in ('first_byte_latency', 'last_byte_latency'):
stat_dict[latency_type] = self._series_stats(
stat_dict.get(latency_type, []), nth_pctile,
format_numbers)
except __HOLE__:
logging.exception('stat_dict: %r', stat_dict)
raise
|
KeyError
|
dataset/ETHPy150Open swiftstack/ssbench/ssbench/reporter.py/Reporter._compute_latency_stats
|
1,968
|
def _compute_req_per_sec(self, stat_dict):
try:
sd_start = stat_dict['start']
except __HOLE__:
stat_dict['avg_req_per_sec'] = 0.0
else:
delta_t = stat_dict['stop'] - sd_start
if delta_t != 0:
stat_dict['avg_req_per_sec'] = round(
stat_dict['req_count'] / delta_t,
6)
else:
stat_dict['avg_req_per_sec'] = float("inf")
|
KeyError
|
dataset/ETHPy150Open swiftstack/ssbench/ssbench/reporter.py/Reporter._compute_req_per_sec
|
1,969
|
def _add_result_to(self, stat_dict, result):
if 'errors' not in stat_dict:
stat_dict['errors'] = 0
try:
res_start = result['start']
except KeyError:
pass
else:
try:
sd_start = stat_dict['start']
except __HOLE__:
stat_dict['start'] = res_start
else:
if res_start < sd_start:
stat_dict['start'] = res_start
try:
sd_stop = stat_dict['stop']
except KeyError:
stat_dict['stop'] = result['completed_at']
else:
if result['completed_at'] > sd_stop:
stat_dict['stop'] = result['completed_at']
stat_dict['retries'] = \
stat_dict.get('retries', 0) + int(result['retries'])
if 'exception' not in result:
stat_dict['req_count'] = stat_dict.get('req_count', 0) + 1
self._rec_latency(stat_dict, result)
else:
stat_dict['errors'] += 1
|
KeyError
|
dataset/ETHPy150Open swiftstack/ssbench/ssbench/reporter.py/Reporter._add_result_to
|
1,970
|
def download(url, server_fname, local_fname=None, progress_update_percentage=5,
bypass_certificate_check=False):
"""
An internet download utility modified from
http://stackoverflow.com/questions/22676/
how-do-i-download-a-file-over-http-using-python/22776#22776
"""
if bypass_certificate_check:
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
u = urllib.urlopen(url, context=ctx)
else:
u = urllib.urlopen(url)
if local_fname is None:
local_fname = server_fname
full_path = local_fname
meta = u.info()
with open(full_path, 'wb') as f:
try:
file_size = int(meta.get("Content-Length"))
except __HOLE__:
print("WARNING: Cannot get file size, displaying bytes instead!")
file_size = 100
print("Downloading: %s Bytes: %s" % (server_fname, file_size))
file_size_dl = 0
block_sz = int(1E7)
p = 0
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if (file_size_dl * 100. / file_size) > p:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl *
100. / file_size)
print(status)
p += progress_update_percentage
|
TypeError
|
dataset/ETHPy150Open sklearn-theano/sklearn-theano/sklearn_theano/datasets/base.py/download
|
1,971
|
def get_trans_id_time(trans_id):
"""
Returns the time.time() embedded in the trans_id or None if no
time information is embedded.
Copied from the Swift codebase.
Copyright (c) 2010-2012 OpenStack Foundation
"""
if len(trans_id) >= 34 and trans_id[:2] == 'tx' and trans_id[23] == '-':
try:
return int(trans_id[24:34], 16)
except __HOLE__:
pass
return None
|
ValueError
|
dataset/ETHPy150Open gholt/swiftly/swiftly/client/utils.py/get_trans_id_time
|
1,972
|
def _unregister_event(self, event):
try:
self._evrd.remove(event)
except __HOLE__:
pass
try:
self._evwr.remove(event)
except KeyError:
pass
try:
self._ioevents.remove(event)
except KeyError:
pass
|
KeyError
|
dataset/ETHPy150Open couchbase/couchbase-python-client/couchbase/iops/select.py/SelectIOPS._unregister_event
|
1,973
|
def update_event(self, event, action, flags, fd=None):
if action == PYCBC_EVACTION_UNWATCH:
self._unregister_event(event)
return
elif action == PYCBC_EVACTION_WATCH:
if flags & LCB_READ_EVENT:
self._evrd.add(event)
else:
try:
self._evrd.remove(event)
except KeyError:
pass
if flags & LCB_WRITE_EVENT:
self._evwr.add(event)
else:
try:
self._evwr.remove(event)
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open couchbase/couchbase-python-client/couchbase/iops/select.py/SelectIOPS.update_event
|
1,974
|
@app.task(bind=True)
def execute_scheduled_maintenance(self, maintenance_id):
LOG.debug("Maintenance id: {}".format(maintenance_id))
maintenance = models.Maintenance.objects.get(id=maintenance_id)
models.Maintenance.objects.filter(id=maintenance_id,
).update(status=maintenance.RUNNING, started_at=datetime.now())
LOG.info("Maintenance {} is RUNNING".format(maintenance,))
worker_name = get_worker_name()
task_history = TaskHistory.register(
request=self.request, worker_name=worker_name)
LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))
task_history.update_details(persist=True,
details="Executing Maintenance: {}".format(maintenance))
for hm in models.HostMaintenance.objects.filter(maintenance=maintenance):
main_output = {}
hm.status = hm.RUNNING
hm.started_at = datetime.now()
hm.save()
if hm.host is None:
hm.status = hm.UNAVAILABLEHOST
hm.finished_at = datetime.now()
hm.save()
continue
host = hm.host
update_task = "\nRunning Maintenance on {}".format(host)
try:
cloudstack_host_attributes = host.cs_host_attributes.get()
except __HOLE__ as e:
LOG.warn(
"Host {} does not have cloudstack attrs...{}".format(hm.host, e))
hm.status = hm.UNAVAILABLECSHOSTATTR
hm.finished_at = datetime.now()
hm.save()
continue
param_dict = {}
for param in models.MaintenanceParameters.objects.filter(maintenance=maintenance):
param_function = _get_function(param.function_name)
param_dict[param.parameter_name] = param_function(host.id)
main_script = build_context_script(param_dict, maintenance.main_script)
exit_status = exec_remote_command(server=host.address,
username=cloudstack_host_attributes.vm_user,
password=cloudstack_host_attributes.vm_password,
command=main_script, output=main_output)
if exit_status == 0:
hm.status = hm.SUCCESS
else:
if maintenance.rollback_script:
rollback_output = {}
hm.status = hm.ROLLBACK
hm.save()
rollback_script = build_context_script(
param_dict, maintenance.rollback_script)
exit_status = exec_remote_command(server=host.address,
username=cloudstack_host_attributes.vm_user,
password=cloudstack_host_attributes.vm_password,
command=rollback_script, output=rollback_output)
if exit_status == 0:
hm.status = hm.ROLLBACK_SUCCESS
else:
hm.status = hm.ROLLBACK_ERROR
hm.rollback_log = get_dict_lines(rollback_output)
else:
hm.status = hm.ERROR
update_task += "...status: {}".format(hm.status)
task_history.update_details(persist=True,
details=update_task)
hm.main_log = get_dict_lines(main_output)
hm.finished_at = datetime.now()
hm.save()
models.Maintenance.objects.filter(id=maintenance_id,
).update(status=maintenance.FINISHED, finished_at=datetime.now())
task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
details='Maintenance executed succesfully')
LOG.info("Maintenance: {} has FINISHED".format(maintenance,))
|
ObjectDoesNotExist
|
dataset/ETHPy150Open globocom/database-as-a-service/dbaas/maintenance/tasks.py/execute_scheduled_maintenance
|
1,975
|
def default(self, obj):
if hasattr(obj, 'json_repr'):
return self.default(obj.json_repr())
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, collections.Iterable) and not is_stringy(obj):
try:
return {k: self.default(v) for k, v in obj.items()}
except __HOLE__:
return [self.default(e) for e in obj]
return obj
|
AttributeError
|
dataset/ETHPy150Open thefactory/marathon-python/marathon/util.py/MarathonJsonEncoder.default
|
1,976
|
def default(self, obj):
if hasattr(obj, 'json_repr'):
return self.default(obj.json_repr(minimal=True))
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, collections.Iterable) and not is_stringy(obj):
try:
return {k: self.default(v) for k, v in obj.items() if (v or v in (False, 0))}
except __HOLE__:
return [self.default(e) for e in obj if (e or e in (False, 0))]
return obj
|
AttributeError
|
dataset/ETHPy150Open thefactory/marathon-python/marathon/util.py/MarathonMinimalJsonEncoder.default
|
1,977
|
def load_backends():
backends = []
for medium_id, bits in enumerate(getattr(settings, "NOTIFICATION_BACKENDS", default_backends)):
if len(bits) == 2:
label, backend_path = bits
spam_sensitivity = None
elif len(bits) == 3:
label, backend_path, spam_sensitivity = bits
else:
raise exceptions.ImproperlyConfigured, "NOTIFICATION_BACKENDS does not contain enough data."
dot = backend_path.rindex(".")
backend_mod, backend_class = backend_path[:dot], backend_path[dot+1:]
try:
# import the module and get the module from sys.modules
__import__(backend_mod)
mod = sys.modules[backend_mod]
except __HOLE__, e:
raise exceptions.ImproperlyConfigured, 'Error importing notification backend %s: "%s"' % (backend_mod, e)
# add the backend label and an instantiated backend class to the
# backends list.
backend_instance = getattr(mod, backend_class)(medium_id, spam_sensitivity)
backends.append(((medium_id, label), backend_instance))
return dict(backends)
|
ImportError
|
dataset/ETHPy150Open brosner/django-notification/notification/backends/__init__.py/load_backends
|
1,978
|
def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
requirements=None,
env=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
global_options=None,
install_options=None,
user=None,
no_chown=False,
cwd=None,
activate=False,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
saltenv='base',
env_vars=None,
use_vt=False,
trusted_host=None,
no_cache_dir=False):
'''
Install packages with pip
Install packages individually or from a pip requirements file. Install
packages globally or to a virtualenv.
pkgs
Comma separated list of packages to install
requirements
Path to requirements
bin_env
Path to pip bin or path to virtualenv. If doing a system install,
and want to use a specific pip bin (pip-2.7, pip-2.6, etc..) just
specify the pip bin you want.
.. note::
If installing into a virtualenv, just use the path to the
virtualenv (e.g. ``/home/code/path/to/virtualenv/``)
env
Deprecated, use bin_env now
use_wheel
Prefer wheel archives (requires pip>=1.4)
no_use_wheel
Force to not use wheel archives (requires pip>=1.4)
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form ``user:passwd@proxy.server:port``. Note
that the ``user:password@`` is optional and required only if you are
behind an authenticated proxy. If you provide
``user@proxy.server:port`` then you will be prompted for a password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (e.g.
``git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed``)
find_links
URL to search for packages
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_download
Don't download any packages, just install the ones already downloaded
(completes an install run with ``--no-install``)
install_options
Extra arguments to be supplied to the setup.py install command (e.g.
like ``--install-option='--install-scripts=/usr/local/bin'``). Use
multiple --install-option options to pass multiple options to setup.py
install. If you are using an option with a directory path, be sure to
use absolute path.
global_options
Extra global options to be supplied to the setup.py call before the
install command.
user
The user under which to run pip
no_chown
When user is given, do not attempt to copy and chown a requirements
file
cwd
Current working directory to run pip from
activate
Activates the virtual environment, if given via bin_env, before running
install.
.. deprecated:: 2014.7.2
If `bin_env` is given, pip will already be sourced from that
virtualenv, making `activate` effectively a noop.
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated
list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma
separated list)
process_dependency_links
Enable the processing of dependency links
env_vars
Set environment variables that some builds will depend on. For example,
a Python C-module may have a Makefile that needs INCLUDE_PATH set to
pick up a header file while compiling. This must be in the form of a
dictionary or a mapping.
Example:
.. code-block:: bash
salt '*' pip.install django_app env_vars="{'CUSTOM_PATH': '/opt/django_app'}"
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
use_vt
Use VT terminal emulation (see output while installing)
no_cache_dir
Disable the cache.
CLI Example:
.. code-block:: bash
salt '*' pip.install <package name>,<package2 name>
salt '*' pip.install requirements=/path/to/requirements.txt
salt '*' pip.install <package name> bin_env=/path/to/virtualenv
salt '*' pip.install <package name> bin_env=/path/to/pip_bin
Complicated CLI example::
salt '*' pip.install markdown,django \
editable=git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed upgrade=True no_deps=True
'''
# Switching from using `pip_bin` and `env` to just `bin_env`
# cause using an env and a pip bin that's not in the env could
# be problematic.
# Still using the `env` variable, for backwards compatibility's sake
# but going fwd you should specify either a pip bin or an env with
# the `bin_env` argument and we'll take care of the rest.
if env and not bin_env:
salt.utils.warn_until(
'Carbon',
'Passing \'env\' to the pip module is deprecated. Use bin_env instead. '
'This functionality will be removed in Salt Carbon.'
)
bin_env = env
if activate:
salt.utils.warn_until(
'Carbon',
'Passing \'activate\' to the pip module is deprecated. If '
'bin_env refers to a virtualenv, there is no need to activate '
'that virtualenv before using pip to install packages in it.'
)
pip_bin = _get_pip_bin(bin_env)
cmd = [pip_bin, 'install']
cleanup_requirements, error = _process_requirements(
requirements=requirements,
cmd=cmd,
cwd=cwd,
saltenv=saltenv,
user=user
)
if error:
return error
if use_wheel:
min_version = '1.4'
cur_version = __salt__['pip.version'](bin_env)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
logger.error(
('The --use-wheel option is only supported in pip {0} and '
'newer. The version of pip detected is {1}. This option '
'will be ignored.'.format(min_version, cur_version))
)
else:
cmd.append('--use-wheel')
if no_use_wheel:
min_version = '1.4'
cur_version = __salt__['pip.version'](bin_env)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
logger.error(
('The --no-use-wheel option is only supported in pip {0} and '
'newer. The version of pip detected is {1}. This option '
'will be ignored.'.format(min_version, cur_version))
)
else:
cmd.append('--no-use-wheel')
if log:
if os.path.isdir(log):
raise IOError(
'\'{0}\' is a directory. Use --log path_to_file'.format(log))
elif not os.access(log, os.W_OK):
raise IOError('\'{0}\' is not writeable'.format(log))
cmd.extend(['--log', log])
if proxy:
cmd.extend(['--proxy', proxy])
if timeout:
try:
if isinstance(timeout, float):
# Catch floating point input, exception will be caught in
# exception class below.
raise ValueError('Timeout cannot be a float')
int(timeout)
except __HOLE__:
raise ValueError(
'\'{0}\' is not a valid timeout, must be an integer'
.format(timeout)
)
cmd.extend(['--timeout', timeout])
if find_links:
if isinstance(find_links, string_types):
find_links = [l.strip() for l in find_links.split(',')]
for link in find_links:
if not (salt.utils.url.validate(link, VALID_PROTOS) or os.path.exists(link)):
raise CommandExecutionError(
'\'{0}\' is not a valid URL or path'.format(link)
)
cmd.extend(['--find-links', link])
if no_index and (index_url or extra_index_url):
raise CommandExecutionError(
'\'no_index\' and (\'index_url\' or \'extra_index_url\') are '
'mutually exclusive.'
)
if index_url:
if not salt.utils.url.validate(index_url, VALID_PROTOS):
raise CommandExecutionError(
'\'{0}\' is not a valid URL'.format(index_url)
)
cmd.extend(['--index-url', index_url])
if extra_index_url:
if not salt.utils.url.validate(extra_index_url, VALID_PROTOS):
raise CommandExecutionError(
'\'{0}\' is not a valid URL'.format(extra_index_url)
)
cmd.extend(['--extra-index-url', extra_index_url])
if no_index:
cmd.append('--no-index')
if mirrors:
if isinstance(mirrors, string_types):
mirrors = [m.strip() for m in mirrors.split(',')]
cmd.append('--use-mirrors')
for mirror in mirrors:
if not mirror.startswith('http://'):
raise CommandExecutionError(
'\'{0}\' is not a valid URL'.format(mirror)
)
cmd.extend(['--mirrors', mirror])
if build:
cmd.extend(['--build', build])
if target:
cmd.extend(['--target', target])
if download:
cmd.extend(['--download', download])
if download_cache:
cmd.extend(['--download-cache', download_cache])
if source:
cmd.extend(['--source', source])
if upgrade:
cmd.append('--upgrade')
if force_reinstall:
cmd.append('--force-reinstall')
if ignore_installed:
cmd.append('--ignore-installed')
if exists_action:
if exists_action.lower() not in ('s', 'i', 'w', 'b'):
raise CommandExecutionError(
'The exists_action pip option only supports the values '
's, i, w, and b. \'{0}\' is not valid.'.format(exists_action)
)
cmd.extend(['--exists-action', exists_action])
if no_deps:
cmd.append('--no-deps')
if no_install:
cmd.append('--no-install')
if no_download:
cmd.append('--no-download')
if no_cache_dir:
cmd.append('--no-cache-dir')
if pre_releases:
# Check the locally installed pip version
pip_version = version(pip_bin)
# From pip v1.4 the --pre flag is available
if salt.utils.compare_versions(ver1=pip_version, oper='>=', ver2='1.4'):
cmd.append('--pre')
if cert:
cmd.extend(['--cert', cert])
if global_options:
if isinstance(global_options, string_types):
global_options = [go.strip() for go in global_options.split(',')]
for opt in global_options:
cmd.extend(['--global-option', opt])
if install_options:
if isinstance(install_options, string_types):
install_options = [io.strip() for io in install_options.split(',')]
for opt in install_options:
cmd.extend(['--install-option', opt])
if pkgs:
if isinstance(pkgs, string_types):
pkgs = [p.strip() for p in pkgs.split(',')]
# It's possible we replaced version-range commas with semicolons so
# they would survive the previous line (in the pip.installed state).
# Put the commas back in while making sure the names are contained in
# quotes, this allows for proper version spec passing salt>=0.17.0
cmd.extend(['{0}'.format(p.replace(';', ',')) for p in pkgs])
if editable:
egg_match = re.compile(r'(?:#|#.*?&)egg=([^&]*)')
if isinstance(editable, string_types):
editable = [e.strip() for e in editable.split(',')]
for entry in editable:
# Is the editable local?
if not (entry == '.' or entry.startswith(('file://', '/'))):
match = egg_match.search(entry)
if not match or not match.group(1):
# Missing #egg=theEggName
raise CommandExecutionError(
'You must specify an egg for this editable'
)
cmd.extend(['--editable', entry])
if allow_all_external:
cmd.append('--allow-all-external')
if allow_external:
if isinstance(allow_external, string_types):
allow_external = [p.strip() for p in allow_external.split(',')]
for pkg in allow_external:
cmd.extend(['--allow-external', pkg])
if allow_unverified:
if isinstance(allow_unverified, string_types):
allow_unverified = \
[p.strip() for p in allow_unverified.split(',')]
for pkg in allow_unverified:
cmd.extend(['--allow-unverified', pkg])
if process_dependency_links:
cmd.append('--process-dependency-links')
if env_vars:
if isinstance(env_vars, dict):
os.environ.update(env_vars)
else:
raise CommandExecutionError(
'env_vars {0} is not a dictionary'.format(env_vars))
if trusted_host:
cmd.extend(['--trusted-host', trusted_host])
try:
cmd_kwargs = dict(saltenv=saltenv, use_vt=use_vt, runas=user)
if cwd:
cmd_kwargs['cwd'] = cwd
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
logger.debug(
'TRY BLOCK: end of pip.install -- cmd: %s, cmd_kwargs: %s',
str(cmd), str(cmd_kwargs)
)
return __salt__['cmd.run_all'](cmd,
python_shell=False,
**cmd_kwargs)
finally:
for tempdir in [cr for cr in cleanup_requirements if cr is not None]:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/modules/pip.py/install
|
1,979
|
def uninstall(pkgs=None,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
no_chown=False,
cwd=None,
saltenv='base',
use_vt=False):
'''
Uninstall packages with pip
Uninstall packages individually or from a pip requirements file. Uninstall
packages globally or from a virtualenv.
pkgs
comma separated list of packages to install
requirements
path to requirements.
bin_env
path to pip bin or path to virtualenv. If doing an uninstall from
the system python and want to use a specific pip bin (pip-2.7,
pip-2.6, etc..) just specify the pip bin you want.
If uninstalling from a virtualenv, just use the path to the virtualenv
(/home/code/path/to/virtualenv/)
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
user
The user under which to run pip
no_chown
When user is given, do not attempt to copy and chown
a requirements file (needed if the requirements file refers to other
files via relative paths, as the copy-and-chown procedure does not
account for such files)
cwd
Current working directory to run pip from
use_vt
Use VT terminal emulation (see output while installing)
CLI Example:
.. code-block:: bash
salt '*' pip.uninstall <package name>,<package2 name>
salt '*' pip.uninstall requirements=/path/to/requirements.txt
salt '*' pip.uninstall <package name> bin_env=/path/to/virtualenv
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin
'''
pip_bin = _get_pip_bin(bin_env)
cmd = [pip_bin, 'uninstall', '-y']
cleanup_requirements, error = _process_requirements(
requirements=requirements, cmd=cmd, saltenv=saltenv, user=user,
cwd=cwd
)
if error:
return error
if log:
try:
# TODO make this check if writeable
os.path.exists(log)
except IOError:
raise IOError('\'{0}\' is not writeable'.format(log))
cmd.extend(['--log', log])
if proxy:
cmd.extend(['--proxy', proxy])
if timeout:
try:
if isinstance(timeout, float):
# Catch floating point input, exception will be caught in
# exception class below.
raise ValueError('Timeout cannot be a float')
int(timeout)
except ValueError:
raise ValueError(
'\'{0}\' is not a valid timeout, must be an integer'
.format(timeout)
)
cmd.extend(['--timeout', timeout])
if pkgs:
if isinstance(pkgs, string_types):
pkgs = [p.strip() for p in pkgs.split(',')]
if requirements:
for requirement in requirements:
with salt.utils.fopen(requirement) as rq_:
for req in rq_:
try:
req_pkg, _ = req.split('==')
if req_pkg in pkgs:
pkgs.remove(req_pkg)
except ValueError:
pass
cmd.extend(pkgs)
cmd_kwargs = dict(python_shell=False, runas=user,
cwd=cwd, saltenv=saltenv, use_vt=use_vt)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
try:
return __salt__['cmd.run_all'](cmd, **cmd_kwargs)
finally:
for requirement in cleanup_requirements:
if requirement:
try:
os.remove(requirement)
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open saltstack/salt/salt/modules/pip.py/uninstall
|
1,980
|
def version(bin_env=None):
'''
.. versionadded:: 0.17.0
Returns the version of pip. Use ``bin_env`` to specify the path to a
virtualenv and get the version of pip in that virtualenv.
If unable to detect the pip version, returns ``None``.
CLI Example:
.. code-block:: bash
salt '*' pip.version
'''
pip_bin = _get_pip_bin(bin_env)
output = __salt__['cmd.run'](
'{0} --version'.format(pip_bin), python_shell=False)
try:
return re.match(r'^pip (\S+)', output).group(1)
except __HOLE__:
return None
|
AttributeError
|
dataset/ETHPy150Open saltstack/salt/salt/modules/pip.py/version
|
1,981
|
def _valueForName(obj, name, executeCallables=False):
nameChunks=name.split('.')
for i in range(len(nameChunks)):
key = nameChunks[i]
if hasattr(obj, 'has_key') and key in obj:
nextObj = obj[key]
else:
try:
nextObj = getattr(obj, key)
except __HOLE__:
_raiseNotFoundException(key, obj)
if executeCallables and hasattr(nextObj, '__call__') and not _isInstanceOrClass(nextObj):
obj = nextObj()
else:
obj = nextObj
return obj
|
AttributeError
|
dataset/ETHPy150Open binhex/moviegrabber/lib/site-packages/Cheetah/NameMapper.py/_valueForName
|
1,982
|
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except __HOLE__:
pass
for field_name, obj in attrs.items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
|
NameError
|
dataset/ETHPy150Open toastdriven/piecrust/piecrust/resources.py/DeclarativeMetaclass.__new__
|
1,983
|
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
if request.is_ajax():
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError, __HOLE__), e:
return self.bad_request(request, e)
except Exception, e:
return self.handle_500(request, e)
return wrapper
|
ValidationError
|
dataset/ETHPy150Open toastdriven/piecrust/piecrust/resources.py/Resource.wrap_view
|
1,984
|
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except __HOLE__:
pass
return kwargs_subset
|
KeyError
|
dataset/ETHPy150Open toastdriven/piecrust/piecrust/resources.py/Resource.remove_api_resource_names
|
1,985
|
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except __HOLE__:
return ''
except NoReverseMatch:
return ''
|
NotImplementedError
|
dataset/ETHPy150Open toastdriven/piecrust/piecrust/resources.py/Resource.dehydrate_resource_uri
|
1,986
|
def convert_post_to_VERB(self, request, verb):
"""
Force Django to process the VERB.
"""
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
if request.method == verb:
if hasattr(request, '_post'):
del(request._post)
del(request._files)
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except __HOLE__:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
|
AttributeError
|
dataset/ETHPy150Open toastdriven/piecrust/piecrust/resources.py/Resource.convert_post_to_VERB
|
1,987
|
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except __HOLE__:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
|
ObjectDoesNotExist
|
dataset/ETHPy150Open toastdriven/piecrust/piecrust/resources.py/Resource.get_detail
|
1,988
|
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
"""
request = self.convert_post_to_patch(request)
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
if "objects" not in deserialized:
raise BadRequest("Invalid data sent.")
if len(deserialized["objects"]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for data in deserialized["objects"]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (__HOLE__, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data))
bundle.obj.pk = obj.pk
self.is_valid(bundle, request)
self.obj_create(bundle, request=request)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data))
self.is_valid(bundle, request)
self.obj_create(bundle, request=request)
if len(deserialized.get('deleted_objects', [])) and 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deserialized.get('deleted_objects', []):
obj = self.get_via_uri(uri, request=request)
self.obj_delete(request=request, _obj=obj)
return http.HttpAccepted()
|
ObjectDoesNotExist
|
dataset/ETHPy150Open toastdriven/piecrust/piecrust/resources.py/Resource.patch_list
|
1,989
|
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = self.convert_post_to_patch(request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except __HOLE__:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
return http.HttpAccepted()
|
ObjectDoesNotExist
|
dataset/ETHPy150Open toastdriven/piecrust/piecrust/resources.py/Resource.patch_detail
|
1,990
|
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
obj_pks = kwargs.get('pk_list', '').split(';')
objects = []
not_found = []
for pk in obj_pks:
try:
obj = self.obj_get(request, pk=pk)
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
except __HOLE__:
not_found.append(pk)
object_list = {
'objects': objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
|
ObjectDoesNotExist
|
dataset/ETHPy150Open toastdriven/piecrust/piecrust/resources.py/Resource.get_multiple
|
1,991
|
def setup():
top_dir = dirname(dirname(abspath(__file__)))
lib_dir = join(top_dir, "lib")
sys.path.insert(0, lib_dir)
# Attempt to get 'pygments' on the import path.
try:
# If already have it, use that one.
import pygments
except __HOLE__:
pygments_dir = join(top_dir, "deps", "pygments")
if sys.version_info[0] <= 2:
sys.path.insert(0, pygments_dir)
else:
sys.path.insert(0, pygments_dir + "3")
|
ImportError
|
dataset/ETHPy150Open an0/Letterpress/code/markdown2/test/test.py/setup
|
1,992
|
def init(driverName=None, debug=False):
'''
Constructs a new TTS engine instance or reuses the existing instance for
the driver name.
@param driverName: Name of the platform specific driver to use. If
None, selects the default driver for the operating system.
@type: str
@param debug: Debugging output enabled or not
@type debug: bool
@return: Engine instance
@rtype: L{engine.Engine}
'''
try:
eng = _activeEngines[driverName]
except __HOLE__:
eng = Engine(driverName, debug)
_activeEngines[driverName] = eng
return eng
|
KeyError
|
dataset/ETHPy150Open parente/pyttsx/pyttsx/__init__.py/init
|
1,993
|
def clear_opts_related_cache(model_class):
"""
Clear the specified model and its children opts related cache.
"""
opts = model_class._meta
if hasattr(opts, '_related_objects_cache'):
children = [
related_object.model
for related_object in opts.get_all_related_objects()
if related_object.field.rel.parent_link
]
else:
children = []
for attr in _opts_related_cache_attrs:
try:
delattr(opts, attr)
except __HOLE__:
pass
for child in children:
clear_opts_related_cache(child)
|
AttributeError
|
dataset/ETHPy150Open charettes/django-mutant/mutant/compat.py/clear_opts_related_cache
|
1,994
|
def setDocExample(self, collector, metrics, defaultpath=None):
if not len(metrics):
return False
filePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'docs', 'collectors-' + collector + '.md')
if not os.path.exists(filePath):
return False
if not os.access(filePath, os.W_OK):
return False
if not os.access(filePath, os.R_OK):
return False
try:
fp = open(filePath, 'Ur')
content = fp.readlines()
fp.close()
fp = open(filePath, 'w')
for line in content:
if line.strip() == '__EXAMPLESHERE__':
for metric in sorted(metrics.iterkeys()):
metricPath = 'servers.hostname.'
if defaultpath:
metricPath += defaultpath + '.'
metricPath += metric
metricPath = metricPath.replace('..', '.')
fp.write('%s %s\n' % (metricPath, metrics[metric]))
else:
fp.write(line)
fp.close()
except __HOLE__:
return False
return True
|
IOError
|
dataset/ETHPy150Open BrightcoveOS/Diamond/test.py/CollectorTestCase.setDocExample
|
1,995
|
def _sun_jce_pbe_derive_key_and_iv(password, salt, iteration_count):
if len(salt) != 8:
raise ValueError("Expected 8-byte salt for PBEWithMD5AndTripleDES (OID %s), found %d bytes" % (".".join(str(i) for i in SUN_JCE_ALGO_ID), len(salt)))
# Note: unlike JKS, the PBEWithMD5AndTripleDES algorithm as implemented for JCE keystores uses an ASCII string for the password, not a regular Java/UTF-16BE string.
# It validates this explicitly and will throw an InvalidKeySpecException if non-ASCII byte codes are present in the password.
# See PBEKey's constructor in com/sun/crypto/provider/PBEKey.java.
try:
password_bytes = password.encode('ascii')
except (__HOLE__, UnicodeEncodeError):
raise ValueError("Key password contains non-ASCII characters")
salt_halves = [salt[0:4], salt[4:8]]
if salt_halves[0] == salt_halves[1]:
salt_halves[0] = salt_halves[0][::-1] # reversed
derived = b""
for i in range(2):
to_be_hashed = salt_halves[i]
for k in range(iteration_count):
to_be_hashed = hashlib.md5(to_be_hashed + password_bytes).digest()
derived += to_be_hashed
key = derived[:-8] # = 24 bytes
iv = derived[-8:]
return key, iv
|
UnicodeDecodeError
|
dataset/ETHPy150Open doublereedkurt/pyjks/jks/jks.py/_sun_jce_pbe_derive_key_and_iv
|
1,996
|
def onTabChanged(self, event, *args):
try:
widgetIndex = event.widget.index("current")
tabId = event.widget.tabs()[widgetIndex]
for widget in event.widget.winfo_children():
if str(widget) == tabId:
self.currentView = widget.view
break
except (AttributeError, __HOLE__, TclError):
pass
|
TypeError
|
dataset/ETHPy150Open Arelle/Arelle/arelle/CntlrWinMain.py/CntlrWinMain.onTabChanged
|
1,997
|
def getViewAndModelXbrl(self):
view = getattr(self, "currentView", None)
if view:
modelXbrl = None
try:
modelXbrl = view.modelXbrl
return (view, modelXbrl)
except __HOLE__:
return (view, None)
return (None, None)
|
AttributeError
|
dataset/ETHPy150Open Arelle/Arelle/arelle/CntlrWinMain.py/CntlrWinMain.getViewAndModelXbrl
|
1,998
|
def okayToContinue(self):
view, modelXbrl = self.getViewAndModelXbrl()
documentIsModified = False
if view is not None:
try:
# What follows only exists in ViewWinRenderedGrid
view.updateInstanceFromFactPrototypes()
except __HOLE__:
pass
if modelXbrl is not None:
documentIsModified = modelXbrl.isModified()
if not self.dirty and (not documentIsModified):
return True
reply = tkinter.messagebox.askokcancel(
_("arelle - Unsaved Changes"),
_("Are you sure to close the current instance without saving?\n (OK will discard changes.)"),
parent=self.parent)
if reply is None:
return False
else:
return reply
|
AttributeError
|
dataset/ETHPy150Open Arelle/Arelle/arelle/CntlrWinMain.py/CntlrWinMain.okayToContinue
|
1,999
|
def fileSave(self, event=None, view=None, fileType=None, filenameFromInstance=False, *ignore):
if view is None:
view = getattr(self, "currentView", None)
if view is not None:
filename = None
modelXbrl = None
try:
modelXbrl = view.modelXbrl
except AttributeError:
pass
if filenameFromInstance:
try:
modelXbrl = view.modelXbrl
filename = modelXbrl.modelDocument.filepath
if filename.endswith('.xsd'): # DTS entry point, no instance saved yet!
filename = None
except AttributeError:
pass
if isinstance(view, ViewWinRenderedGrid.ViewRenderedGrid):
initialdir = os.path.dirname(modelXbrl.modelDocument.uri)
if fileType in ("html", "xml", None):
if fileType == "html" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("HTML file .html"), "*.html"), (_("HTML file .htm"), "*.htm")],
defaultextension=".html")
elif fileType == "xml" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save Table Layout Model"),
initialdir=initialdir,
filetypes=[(_("Layout model file .xml"), "*.xml")],
defaultextension=".xml")
else: # ask file type
if filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save XBRL Instance or HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml"), (_("HTML table .html"), "*.html"), (_("HTML table .htm"), "*.htm")],
defaultextension=".html")
if filename and (filename.endswith(".xbrl") or filename.endswith(".xml")):
view.saveInstance(filename)
return True
if not filename:
return False
try:
ViewFileRenderedGrid.viewRenderedGrid(modelXbrl, filename, lang=self.labelLang, sourceView=view)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif fileType == "xbrl":
return self.uiFileDialog("save",
title=_("arelle - Save Instance"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml")],
defaultextension=".xbrl")
elif isinstance(view, ViewWinTests.ViewTests) and modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.TESTCASE):
filename = self.uiFileDialog("save",
title=_("arelle - Save Test Results"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv")],
defaultextension=".csv")
if not filename:
return False
try:
ViewFileTests.viewTests(self.modelManager.modelXbrl, filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinTree.ViewTree):
filename = self.uiFileDialog("save",
title=_("arelle - Save {0}").format(view.tabTitle),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv"),(_("HTML file"), "*.html"),(_("XML file"), "*.xml"),(_("JSON file"), "*.json")],
defaultextension=".csv")
if not filename:
return False
try:
if isinstance(view, ViewWinRoleTypes.ViewRoleTypes):
ViewFileRoleTypes.viewRoleTypes(modelXbrl, filename, view.tabTitle, view.isArcrole, lang=view.lang)
elif isinstance(view, ViewWinConcepts.ViewConcepts):
ViewFileConcepts.viewConcepts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
elif isinstance(view, ViewWinFactList.ViewFactList):
ViewFileFactList.viewFacts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
else:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, filename, view.tabTitle, view.arcrole, labelrole=view.labelrole, lang=view.lang)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinXml.ViewXml) and self.modelManager.modelXbrl.formulaOutputInstance:
filename = self.uiFileDialog("save",
title=_("arelle - Save Formula Result Instance Document"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XBRL output instance .xml"), "*.xml"), (_("XBRL output instance .xbrl"), "*.xbrl")],
defaultextension=".xml")
if not filename:
return False
try:
from arelle import XmlUtil
with open(filename, "w") as fh:
XmlUtil.writexml(fh, self.modelManager.modelXbrl.formulaOutputInstance.modelDocument.xmlDocument, encoding="utf-8")
self.addToLog(_("[info] Saved formula output instance to {0}").format(filename) )
except (__HOLE__, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True
tkinter.messagebox.showwarning(_("arelle - Save what?"),
_("Nothing has been selected that can be saved. \nPlease select a view pane that can be saved."),
parent=self.parent)
'''
if self.filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save File"),
initialdir=".",
filetypes=[(_("Xbrl file"), "*.x*")],
defaultextension=".xbrl")
if not filename:
return False
self.filename = filename
if not self.filename.endswith(".xbrl"):
self.filename += ".xbrl"
try:
with open(self.filename, "wb") as fh:
pickle.dump(self.data, fh, pickle.HIGHEST_PROTOCOL)
self.dirty = False
self.uiShowStatus(_("Saved {0} items to {1}").format(
len(self.data),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True;
'''
|
IOError
|
dataset/ETHPy150Open Arelle/Arelle/arelle/CntlrWinMain.py/CntlrWinMain.fileSave
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.