Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
6,700
|
def get_context_data(self, **kwargs):
context = super(EstablishmentDetail, self).get_context_data(**kwargs)
establishment = context['establishment']
inspections = establishment.inspections.prefetch_related('violations')
inspections = inspections.order_by('-date')
try:
context['latest'] = inspections.filter(type=1)[0]
except __HOLE__:
context['latest'] = None
context['inspections'] = inspections
return context
|
IndexError
|
dataset/ETHPy150Open codefordurham/Durham-Restaurants/inspections/views.py/EstablishmentDetail.get_context_data
|
6,701
|
@classmethod
def _encode_inner(cls, params):
# special case value encoding
ENCODERS = {
list: cls.encode_list,
dict: cls.encode_dict,
datetime.datetime: cls.encode_datetime,
}
if six.PY2:
ENCODERS[types.NoneType] = cls.encode_none
if six.PY3:
ENCODERS[type(None)] = cls.encode_none
out = []
for key, value in six.iteritems(params):
key = cls._utf8(key)
try:
encoder = ENCODERS[value.__class__]
encoder(out, key, value)
except __HOLE__:
# don't need special encoding
try:
value = six.text_type(value)
except:
pass
out.append((key, value))
return out
|
KeyError
|
dataset/ETHPy150Open EasyPost/easypost-python/easypost/__init__.py/Requestor._encode_inner
|
6,702
|
def handle_api_error(self, http_status, http_body, response):
try:
error = response['error']
except (__HOLE__, TypeError):
raise Error("Invalid response from API: (%d) %r " % (http_status, http_body), http_status, http_body)
try:
raise Error(error.get('message', ''), http_status, http_body)
except AttributeError:
raise Error(error, http_status, http_body)
|
KeyError
|
dataset/ETHPy150Open EasyPost/easypost-python/easypost/__init__.py/Requestor.handle_api_error
|
6,703
|
def __getattr__(self, k):
try:
return self.__dict__[k]
except __HOLE__:
pass
raise AttributeError("%r object has no attribute %r" % (type(self).__name__, k))
|
KeyError
|
dataset/ETHPy150Open EasyPost/easypost-python/easypost/__init__.py/EasyPostObject.__getattr__
|
6,704
|
def get(self, k, default=None):
try:
return self[k]
except __HOLE__:
return default
|
KeyError
|
dataset/ETHPy150Open EasyPost/easypost-python/easypost/__init__.py/EasyPostObject.get
|
6,705
|
def setdefault(self, k, default=None):
try:
return self[k]
except __HOLE__:
self[k] = default
return default
|
KeyError
|
dataset/ETHPy150Open EasyPost/easypost-python/easypost/__init__.py/EasyPostObject.setdefault
|
6,706
|
@classmethod
def retrieve(cls, easypost_id, api_key=None, **params):
try:
easypost_id = easypost_id['id']
except (__HOLE__, TypeError):
pass
instance = cls(easypost_id, api_key, **params)
instance.refresh()
return instance
|
KeyError
|
dataset/ETHPy150Open EasyPost/easypost-python/easypost/__init__.py/Resource.retrieve
|
6,707
|
def lowest_rate(self, carriers=None, services=None):
carriers = carriers or []
services = services or []
lowest_rate = None
try:
carriers = carriers.split(',')
except AttributeError:
pass
carriers = [c.lower() for c in carriers]
try:
services = services.split(',')
except __HOLE__:
pass
services = [service.lower() for service in services]
for rate in self.rates:
rate_carrier = rate.carrier.lower()
if len(carriers) > 0 and rate_carrier not in carriers:
continue
rate_service = rate.service.lower()
if len(services) > 0 and rate_service not in services:
continue
if lowest_rate is None or float(rate.rate) < float(lowest_rate.rate):
lowest_rate = rate
if lowest_rate is None:
raise Error('No rates found.')
return lowest_rate
|
AttributeError
|
dataset/ETHPy150Open EasyPost/easypost-python/easypost/__init__.py/Shipment.lowest_rate
|
6,708
|
@classmethod
def retrieve(cls, easypost_id="", api_key=None, **params):
try:
easypost_id = easypost_id['id']
except (__HOLE__, TypeError):
pass
if easypost_id == "":
requestor = Requestor(api_key)
response, api_key = requestor.request('get', cls.class_url())
return convert_to_easypost_object(response, api_key)
else:
instance = cls(easypost_id, api_key, **params)
instance.refresh()
return instance
|
KeyError
|
dataset/ETHPy150Open EasyPost/easypost-python/easypost/__init__.py/User.retrieve
|
6,709
|
@classmethod
def retrieve(cls, easypost_id, api_key=None, **params):
try:
easypost_id = easypost_id['id']
except (__HOLE__, TypeError):
pass
requestor = Requestor(api_key)
url = "%s/%s" % (cls.class_url(), easypost_id)
response, api_key = requestor.request('get', url)
return response["signed_url"]
|
KeyError
|
dataset/ETHPy150Open EasyPost/easypost-python/easypost/__init__.py/Blob.retrieve
|
6,710
|
def authenticate(self, request):
"""
Returns a `User` if a correct username and password have been supplied
using HTTP Basic authentication. Otherwise returns `None`.
"""
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b'basic':
return None
if len(auth) == 1:
msg = _('Invalid basic header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid basic header. Credentials string should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
try:
auth_parts = base64.b64decode(auth[1]).decode(HTTP_HEADER_ENCODING).partition(':')
except (TypeError, __HOLE__):
msg = _('Invalid basic header. Credentials not correctly base64 encoded.')
raise exceptions.AuthenticationFailed(msg)
userid, password = auth_parts[0], auth_parts[2]
return self.authenticate_credentials(userid, password)
|
UnicodeDecodeError
|
dataset/ETHPy150Open tomchristie/django-rest-framework/rest_framework/authentication.py/BasicAuthentication.authenticate
|
6,711
|
def _getResolver(self, serverResponses, maximumQueries=10):
"""
Create and return a new L{root.Resolver} modified to resolve queries
against the record data represented by C{servers}.
@param serverResponses: A mapping from dns server addresses to
mappings. The inner mappings are from query two-tuples (name,
type) to dictionaries suitable for use as **arguments to
L{_respond}. See that method for details.
"""
roots = ['1.1.2.3']
resolver = Resolver(roots, maximumQueries)
def query(query, serverAddresses, timeout, filter):
msg("Query for QNAME %s at %r" % (query.name, serverAddresses))
for addr in serverAddresses:
try:
server = serverResponses[addr]
except __HOLE__:
continue
records = server[str(query.name), query.type]
return succeed(self._respond(**records))
resolver._query = query
return resolver
|
KeyError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/names/test/test_rootresolve.py/RootResolverTests._getResolver
|
6,712
|
def attachBitcodePathToObject(bcPath, outFileName):
# Don't try to attach a bitcode path to a binary. Unfortunately
# that won't work.
(root, ext) = os.path.splitext(outFileName)
_logger.debug('attachBitcodePathToObject: {0} ===> {1} [ext = {2}]\n'.format(bcPath, outFileName, ext))
#iam: this also looks very dodgey; we need a more reliable way to do this:
if ext not in ('.o', '.lo', '.os', '.So', '.po'):
_logger.warning('Cannot attach bitcode path to "{0} of type {1}"'.format(outFileName, FileType.getFileType(outFileName)))
return
# Now just build a temporary text file with the full path to the
# bitcode file that we'll write into the object file.
f = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
absBcPath = os.path.abspath(bcPath)
f.write(absBcPath.encode())
f.write('\n'.encode())
_logger.debug(pprint.pformat('Wrote "{0}" to file "{1}"'.format(absBcPath, f.name)))
# Ensure buffers are flushed so that objcopy doesn't read an empty
# file
f.flush()
os.fsync(f.fileno())
f.close()
# Now write our bitcode section
if (sys.platform.startswith('darwin')):
objcopyCmd = ['ld', '-r', '-keep_private_externs', outFileName, '-sectcreate', darwinSegmentName, darwinSectionName, f.name, '-o', outFileName]
else:
objcopyCmd = ['objcopy', '--add-section', '{0}={1}'.format(elfSectionName, f.name), outFileName]
orc = 0
try:
if os.path.getsize(outFileName) > 0:
objProc = Popen(objcopyCmd)
orc = objProc.wait()
except __HOLE__:
# configure loves to immediately delete things, causing issues for
# us here. Just ignore it
os.remove(f.name)
sys.exit(0)
os.remove(f.name)
if orc != 0:
_logger.error('objcopy failed with {0}'.format(orc))
sys.exit(-1)
|
OSError
|
dataset/ETHPy150Open travitch/whole-program-llvm/driver/utils.py/attachBitcodePathToObject
|
6,713
|
def is_iterable(x):
"A implementation independent way of checking for iterables"
try:
iter(x)
except __HOLE__:
return False
else:
return True
|
TypeError
|
dataset/ETHPy150Open django/django/django/utils/itercompat.py/is_iterable
|
6,714
|
def main(fname_anat, fname_centerline, degree_poly, centerline_fitting, interp, remove_temp_files, verbose):
# extract path of the script
path_script = os.path.dirname(__file__)+'/'
# Parameters for debug mode
if param.debug == 1:
print '\n*** WARNING: DEBUG MODE ON ***\n'
status, path_sct_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
fname_anat = path_sct_data+'/t2/t2.nii.gz'
fname_centerline = path_sct_data+'/t2/t2_seg.nii.gz'
# extract path/file/extension
path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
# Display arguments
print '\nCheck input arguments...'
print ' Input volume ...................... '+fname_anat
print ' Centerline ........................ '+fname_centerline
print ''
# Get input image orientation
im_anat = Image(fname_anat)
input_image_orientation = get_orientation_3d(im_anat)
# Reorient input data into RL PA IS orientation
im_centerline = Image(fname_centerline)
im_anat_orient = set_orientation(im_anat, 'RPI')
im_anat_orient.setFileName('tmp.anat_orient.nii')
im_centerline_orient = set_orientation(im_centerline, 'RPI')
im_centerline_orient.setFileName('tmp.centerline_orient.nii')
# Open centerline
#==========================================================================================
print '\nGet dimensions of input centerline...'
nx, ny, nz, nt, px, py, pz, pt = im_centerline_orient.dim
print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
print '.. voxel size: '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
print '\nOpen centerline volume...'
data = im_centerline_orient.data
X, Y, Z = (data>0).nonzero()
min_z_index, max_z_index = min(Z), max(Z)
# loop across z and associate x,y coordinate with the point having maximum intensity
x_centerline = [0 for iz in range(min_z_index, max_z_index+1, 1)]
y_centerline = [0 for iz in range(min_z_index, max_z_index+1, 1)]
z_centerline = [iz for iz in range(min_z_index, max_z_index+1, 1)]
# Two possible scenario:
# 1. the centerline is probabilistic: each slices contains voxels with the probability of containing the centerline [0:...:1]
# We only take the maximum value of the image to aproximate the centerline.
# 2. The centerline/segmentation image contains many pixels per slice with values {0,1}.
# We take all the points and approximate the centerline on all these points.
X, Y, Z = ((data<1)*(data>0)).nonzero() # X is empty if binary image
if (len(X) > 0): # Scenario 1
for iz in range(min_z_index, max_z_index+1, 1):
x_centerline[iz-min_z_index], y_centerline[iz-min_z_index] = numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape)
else: # Scenario 2
for iz in range(min_z_index, max_z_index+1, 1):
x_seg, y_seg = (data[:,:,iz]>0).nonzero()
if len(x_seg) > 0:
x_centerline[iz-min_z_index] = numpy.mean(x_seg)
y_centerline[iz-min_z_index] = numpy.mean(y_seg)
# TODO: find a way to do the previous loop with this, which is more neat:
# [numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape) for iz in range(0,nz,1)]
# clear variable
del data
# Fit the centerline points with the kind of curve given as argument of the script and return the new smoothed coordinates
if centerline_fitting == 'nurbs':
try:
x_centerline_fit, y_centerline_fit = b_spline_centerline(x_centerline,y_centerline,z_centerline)
except __HOLE__:
print "splines fitting doesn't work, trying with polynomial fitting...\n"
x_centerline_fit, y_centerline_fit = polynome_centerline(x_centerline,y_centerline,z_centerline)
elif centerline_fitting == 'polynome':
x_centerline_fit, y_centerline_fit = polynome_centerline(x_centerline,y_centerline,z_centerline)
#==========================================================================================
# Split input volume
print '\nSplit input volume...'
im_anat_orient_split_list = split_data(im_anat_orient, 2)
file_anat_split = []
for im in im_anat_orient_split_list:
file_anat_split.append(im.absolutepath)
im.save()
# initialize variables
file_mat_inv_cumul = ['tmp.mat_inv_cumul_Z'+str(z).zfill(4) for z in range(0,nz,1)]
z_init = min_z_index
displacement_max_z_index = x_centerline_fit[z_init-min_z_index]-x_centerline_fit[max_z_index-min_z_index]
# write centerline as text file
print '\nGenerate fitted transformation matrices...'
file_mat_inv_cumul_fit = ['tmp.mat_inv_cumul_fit_Z'+str(z).zfill(4) for z in range(0,nz,1)]
for iz in range(min_z_index, max_z_index+1, 1):
# compute inverse cumulative fitted transformation matrix
fid = open(file_mat_inv_cumul_fit[iz], 'w')
if (x_centerline[iz-min_z_index] == 0 and y_centerline[iz-min_z_index] == 0):
displacement = 0
else:
displacement = x_centerline_fit[z_init-min_z_index]-x_centerline_fit[iz-min_z_index]
fid.write('%i %i %i %f\n' %(1, 0, 0, displacement) )
fid.write('%i %i %i %f\n' %(0, 1, 0, 0) )
fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
fid.close()
# we complete the displacement matrix in z direction
for iz in range(0, min_z_index, 1):
fid = open(file_mat_inv_cumul_fit[iz], 'w')
fid.write('%i %i %i %f\n' %(1, 0, 0, 0) )
fid.write('%i %i %i %f\n' %(0, 1, 0, 0) )
fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
fid.close()
for iz in range(max_z_index+1, nz, 1):
fid = open(file_mat_inv_cumul_fit[iz], 'w')
fid.write('%i %i %i %f\n' %(1, 0, 0, displacement_max_z_index) )
fid.write('%i %i %i %f\n' %(0, 1, 0, 0) )
fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
fid.close()
# apply transformations to data
print '\nApply fitted transformation matrices...'
file_anat_split_fit = ['tmp.anat_orient_fit_Z'+str(z).zfill(4) for z in range(0,nz,1)]
for iz in range(0, nz, 1):
# forward cumulative transformation to data
sct.run(fsloutput+'flirt -in '+file_anat_split[iz]+' -ref '+file_anat_split[iz]+' -applyxfm -init '+file_mat_inv_cumul_fit[iz]+' -out '+file_anat_split_fit[iz]+' -interp '+interp)
# Merge into 4D volume
print '\nMerge into 4D volume...'
from glob import glob
im_to_concat_list = [Image(fname) for fname in glob('tmp.anat_orient_fit_Z*.nii')]
im_concat_out = concat_data(im_to_concat_list, 2)
im_concat_out.setFileName('tmp.anat_orient_fit.nii')
im_concat_out.save()
# sct.run(fsloutput+'fslmerge -z tmp.anat_orient_fit tmp.anat_orient_fit_z*')
# Reorient data as it was before
print '\nReorient data back into native orientation...'
fname_anat_fit_orient = set_orientation(im_concat_out.absolutepath, input_image_orientation, filename=True)
move(fname_anat_fit_orient, 'tmp.anat_orient_fit_reorient.nii')
# Generate output file (in current folder)
print '\nGenerate output file (in current folder)...'
sct.generate_output_file('tmp.anat_orient_fit_reorient.nii', file_anat+'_flatten'+ext_anat)
# Delete temporary files
if remove_temp_files == 1:
print '\nDelete temporary files...'
sct.run('rm -rf tmp.*')
# to view results
print '\nDone! To view results, type:'
print 'fslview '+file_anat+ext_anat+' '+file_anat+'_flatten'+ext_anat+' &\n'
|
ValueError
|
dataset/ETHPy150Open neuropoly/spinalcordtoolbox/scripts/sct_flatten_sagittal.py/main
|
6,715
|
def handle_message(message, topic_handlers):
"""
Deserialize and process a message from the reader.
For each message, `handler` is called with the deserialized message and a
single :py:class:`h.streamer.WebSocket` instance, and should return the
message to be sent to the client on that socket. The handler can return
`None`, to signify that no message should be sent, or a JSON-serializable
object. It is assumed that there is a 1:1 request-reply mapping between
incoming messages and messages to be sent out over the websockets.
"""
data = message.payload
try:
handler = topic_handlers[message.topic]
except __HOLE__:
raise RuntimeError("Don't know how to handle message from topic: "
"{}".format(message.topic))
# N.B. We iterate over a non-weak list of instances because there's nothing
# to stop connections being added or dropped during iteration, and if that
# happens Python will throw a "Set changed size during iteration" error.
sockets = list(websocket.WebSocket.instances)
for socket in sockets:
reply = handler(data, socket)
if reply is None:
continue
if not socket.terminated:
socket.send(json.dumps(reply))
|
KeyError
|
dataset/ETHPy150Open hypothesis/h/h/streamer/messages.py/handle_message
|
6,716
|
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (__HOLE__,AttributeError,NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
|
TypeError
|
dataset/ETHPy150Open CollabQ/CollabQ/vendor/wsgiref/handlers.py/BaseHandler.set_content_length
|
6,717
|
def __init__(self, source, diag=None):
if diag is None:
diag = DefaultDiagnostics()
self.source = diag.wrap(iter(source))
try:
self.buffer = [self.source.next()]
except __HOLE__:
self.buffer = []
self.index = 0
self.len = len(self.buffer)
self.depth = 0
self.offset = 0
self.commit_depth = 0
self.diag = diag
|
StopIteration
|
dataset/ETHPy150Open brehaut/picoparse/picoparse/__init__.py/BufferWalker.__init__
|
6,718
|
def _fill(self, size):
"""fills the internal buffer from the source iterator"""
try:
for i in range(size):
self.buffer.append(self.source.next())
except __HOLE__:
self.buffer.append((EndOfFile, EndOfFile))
self.len = len(self.buffer)
|
StopIteration
|
dataset/ETHPy150Open brehaut/picoparse/picoparse/__init__.py/BufferWalker._fill
|
6,719
|
def one_of(these):
"""Returns the current token if is found in the collection provided.
Fails otherwise.
"""
ch = peek()
try:
if (ch is EndOfFile) or (ch not in these):
fail(list(these))
except __HOLE__:
if ch != these:
fail([these])
next()
return ch
|
TypeError
|
dataset/ETHPy150Open brehaut/picoparse/picoparse/__init__.py/one_of
|
6,720
|
def not_one_of(these):
"""Returns the current token if it is not found in the collection provided.
The negative of one_of.
"""
ch = peek()
desc = "not_one_of" + repr(these)
try:
if (ch is EndOfFile) or (ch in these):
fail([desc])
except __HOLE__:
if ch != these:
fail([desc])
next()
return ch
|
TypeError
|
dataset/ETHPy150Open brehaut/picoparse/picoparse/__init__.py/not_one_of
|
6,721
|
def _read_possible_json(serialized, info_file):
try:
d = jsonutils.loads(serialized)
except __HOLE__ as e:
LOG.error(_LE('Error reading image info file %(filename)s: '
'%(error)s'),
{'filename': info_file,
'error': e})
d = {}
return d
|
ValueError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/virt/libvirt/imagecache.py/_read_possible_json
|
6,722
|
def _remove_old_enough_file(self, base_file, maxage, remove_sig=True,
remove_lock=True):
"""Remove a single swap or base file if it is old enough."""
exists, age = self._get_age_of_file(base_file)
if not exists:
return
lock_file = os.path.split(base_file)[-1]
@utils.synchronized(lock_file, external=True,
lock_path=self.lock_path)
def _inner_remove_old_enough_file():
# NOTE(mikal): recheck that the file is old enough, as a new
# user of the file might have come along while we were waiting
# for the lock
exists, age = self._get_age_of_file(base_file)
if not exists or age < maxage:
return
LOG.info(_LI('Removing base or swap file: %s'), base_file)
try:
os.remove(base_file)
if remove_sig:
signature = get_info_filename(base_file)
if os.path.exists(signature):
os.remove(signature)
except __HOLE__ as e:
LOG.error(_LE('Failed to remove %(base_file)s, '
'error was %(error)s'),
{'base_file': base_file,
'error': e})
if age < maxage:
LOG.info(_LI('Base or swap file too young to remove: %s'),
base_file)
else:
_inner_remove_old_enough_file()
if remove_lock:
try:
# NOTE(jichenjc) The lock file will be constructed first
# time the image file was accessed. the lock file looks
# like nova-9e881789030568a317fad9daae82c5b1c65e0d4a
# or nova-03d8e206-6500-4d91-b47d-ee74897f9b4e
# according to the original file name
lockutils.remove_external_lock_file(lock_file,
lock_file_prefix='nova-', lock_path=self.lock_path)
except OSError as e:
LOG.debug('Failed to remove %(lock_file)s, '
'error was %(error)s',
{'lock_file': lock_file,
'error': e})
|
OSError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/virt/libvirt/imagecache.py/ImageCacheManager._remove_old_enough_file
|
6,723
|
def get_current(self):
"""
Returns the current ``UserSettings`` based on the SITE_ID in the
project's settings. The ``UserSettings`` object is cached the first
time it's retrieved from the database.
"""
from django.conf import settings
try:
site_id = settings.SITE_ID
except AttributeError:
raise ImproperlyConfigured(
'You\'re using the Django "sites framework" without having '
'set the SITE_ID setting. Create a site in your database and '
'set the SITE_ID setting to fix this error.')
try:
current_usersettings = USERSETTINGS_CACHE[site_id]
except __HOLE__:
current_usersettings = self.get(site_id=site_id)
USERSETTINGS_CACHE[site_id] = current_usersettings
return current_usersettings
|
KeyError
|
dataset/ETHPy150Open mishbahr/django-usersettings2/usersettings/models.py/UserSettingsManager.get_current
|
6,724
|
def clear_usersettings_cache(sender, **kwargs):
"""
Clears the cache (if primed) each time a ``UserSettings`` is saved or deleted
"""
instance = kwargs['instance']
try:
del USERSETTINGS_CACHE[instance.site.pk]
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open mishbahr/django-usersettings2/usersettings/models.py/clear_usersettings_cache
|
6,725
|
def get_user(self):
try:
uid_int = base36_to_int(self.kwargs["uidb36"])
except __HOLE__:
raise Http404()
return get_object_or_404(get_user_model(), id=uid_int)
|
ValueError
|
dataset/ETHPy150Open pinax/django-user-accounts/account/views.py/PasswordResetTokenView.get_user
|
6,726
|
def _get_target_list(self):
""" fetches a list of files in the self.sourcedir location. """
copy_files = None
try:
dir_list = os.listdir(self.sourcedir)
dir_list.sort()
# strip unwanted files
target_list = [os.path.join(self.sourcedir, x) for x in dir_list
if x.lower().endswith(TaggerUtils.FILE_TYPE)]
if self.copy_other_files:
copy_files = [os.path.join(self.sourcedir, x) for x in
dir_list if not x.lower().endswith(TaggerUtils.FILE_TYPE)]
if not target_list:
logger.debug("target_list empty, try to retrieve subfolders")
for y in dir_list:
tmp_list = []
logger.debug("subfolder: %s" % y)
sub_dir = os.path.join(self.sourcedir, y)
if os.path.isdir(sub_dir):
tmp_list.extend(os.listdir(sub_dir))
tmp_list.sort()
tmp_list = [os.path.join(sub_dir, y) for y in tmp_list]
# strip unwanted files
target_list.extend([z for z in tmp_list if
z.lower().endswith(TaggerUtils.FILE_TYPE)])
if self.copy_other_files:
copy_files = [z for z in tmp_list if not
z.lower().endswith(TaggerUtils.FILE_TYPE)]
except __HOLE__, e:
if e.errno == errno.EEXIST:
logger.error("No such directory '%s'", self.sourcedir)
raise IOError("No such directory '%s'", self.sourcedir)
else:
raise IOError("General IO system error '%s'" % errno[e])
return {"target_list": target_list, "copy_files": copy_files}
|
OSError
|
dataset/ETHPy150Open jesseward/discogstagger/discogstagger/taggerutils.py/TaggerUtils._get_target_list
|
6,727
|
def write_file(filecontents, filename):
""" writes a string of data to disk """
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
logger.debug("Writing file '%s' to disk" % filename)
try:
with open(filename, "w") as fh:
fh.write(filecontents)
except __HOLE__:
logger.error("Unable to write file '%s'" % filename)
return True
|
IOError
|
dataset/ETHPy150Open jesseward/discogstagger/discogstagger/taggerutils.py/write_file
|
6,728
|
def browse(request, parent_page_id=None):
# Find parent page
if parent_page_id:
parent_page = get_object_or_404(Page, id=parent_page_id)
else:
parent_page = Page.get_first_root_node()
# Get children of parent page
pages = parent_page.get_children()
# Filter them by page type
# A missing or empty page_type parameter indicates 'all page types' (i.e. descendants of wagtailcore.page)
page_type_string = request.GET.get('page_type') or 'wagtailcore.page'
if page_type_string != 'wagtailcore.page':
try:
desired_classes = page_models_from_string(page_type_string)
except (__HOLE__, LookupError):
raise Http404
# restrict the page listing to just those pages that:
# - are of the given content type (taking into account class inheritance)
# - or can be navigated into (i.e. have children)
choosable_pages = filter_page_type(pages, desired_classes)
descendable_pages = pages.filter(numchild__gt=0)
pages = choosable_pages | descendable_pages
else:
desired_classes = (Page, )
can_choose_root = request.GET.get('can_choose_root', False)
# Parent page can be chosen if it is a instance of desired_classes
parent_page.can_choose = (
issubclass(parent_page.specific_class or Page, desired_classes) and
(can_choose_root or not parent_page.is_root())
)
# Pagination
# We apply pagination first so we don't need to walk the entire list
# in the block below
paginator, pages = paginate(request, pages, per_page=25)
# Annotate each page with can_choose/can_decend flags
for page in pages:
if desired_classes == (Page, ):
page.can_choose = True
else:
page.can_choose = issubclass(page.specific_class or Page, desired_classes)
page.can_descend = page.get_children_count()
# Render
return render_modal_workflow(
request,
'wagtailadmin/chooser/browse.html', 'wagtailadmin/chooser/browse.js',
shared_context(request, {
'parent_page': parent_page,
'pages': pages,
'search_form': SearchForm(),
'page_type_string': page_type_string,
'page_type_names': [desired_class.get_verbose_name() for desired_class in desired_classes],
'page_types_restricted': (page_type_string != 'wagtailcore.page')
})
)
|
ValueError
|
dataset/ETHPy150Open torchbox/wagtail/wagtail/wagtailadmin/views/chooser.py/browse
|
6,729
|
def search(request, parent_page_id=None):
# A missing or empty page_type parameter indicates 'all page types' (i.e. descendants of wagtailcore.page)
page_type_string = request.GET.get('page_type') or 'wagtailcore.page'
try:
desired_classes = page_models_from_string(page_type_string)
except (__HOLE__, LookupError):
raise Http404
search_form = SearchForm(request.GET)
if search_form.is_valid() and search_form.cleaned_data['q']:
pages = Page.objects.exclude(
depth=1 # never include root
)
pages = filter_page_type(pages, desired_classes)
pages = pages.search(search_form.cleaned_data['q'], fields=['title'])
else:
pages = Page.objects.none()
paginator, pages = paginate(request, pages, per_page=25)
for page in pages:
page.can_choose = True
return render(
request, 'wagtailadmin/chooser/_search_results.html',
shared_context(request, {
'searchform': search_form,
'pages': pages,
'page_type_string': page_type_string,
})
)
|
ValueError
|
dataset/ETHPy150Open torchbox/wagtail/wagtail/wagtailadmin/views/chooser.py/search
|
6,730
|
def get_reader(self, filename):
try:
return self.blob_storage.OpenBlob(self.get_blob_key(filename))
except __HOLE__:
return None
|
IOError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/api/files/file_service_stub.py/GoogleStorage.get_reader
|
6,731
|
def command(self, imap_command):
"""
Process an IMAP command we received from the client.
We use introspection to find out what IMAP commands this handler
actually supports.
Arguments:
- `imap_command`: An instance parse.IMAPClientCommand
"""
self.debug_log_cmd(imap_command)
# Since the imap command was properly parsed we know it is a valid
# command. If it is one we support there will be a method
# on this object of the format "do_%s" that will actually do the
# command.
#
# If no such method exists then this is not a supported command.
#
self.tag = imap_command.tag
if not hasattr(self, 'do_%s' % imap_command.command):
self.client.push("%s BAD Sorry, %s is not a supported "
"command\r\n" % (imap_command.tag,
imap_command.command))
return
# Okay. The command was a known command. Process it. Each 'do_' method
# will send any messages back to the client specific to that command
# except the "OK" response and any exceptional errors which are handled
# by this method.
#
start_time = time.time()
try:
result = getattr(self, 'do_%s' % imap_command.command)(imap_command)
except No, e:
result = "%s NO %s\r\n" % (imap_command.tag, str(e))
self.client.push(result)
self.log.debug(result)
return
except Bad, e:
result = "%s BAD %s\r\n" % (imap_command.tag, str(e))
self.client.push(result)
self.log.debug(result)
return
except MailboxLock, e:
self.log.warn("Unable to get lock on mailbox '%s', putting on to command queue" % e.mbox.name)
e.mbox.command_queue.append((self, imap_command))
return
except __HOLE__:
sys.exit(0)
except Exception, e:
result = "%s BAD Unhandled exception: %s\r\n" % (imap_command.tag,
str(e))
self.client.push(result)
self.log.debug(result)
raise
# If there was no result from running this command then everything went
# okay and we send back a final 'OK' to the client for processing this
# command.
#
if result is None:
result = "%s OK %s completed\r\n" % (imap_command.tag,
imap_command.command.upper())
self.client.push(result)
# self.log.debug(result)
elif result is False:
# Some commands do NOT send an OK response immediately.. aka the
# IDLE command and commands that are being processed in multiple
# runs (see 'command_queue' on the mailbox). If result is false
# then we just return. We do not send a message back to our client.
#
return
else:
# The command has some specific response it wants to send back as
# part of the tagged OK response.
#
result = "%s OK %s %s completed\r\n" % \
(imap_command.tag, result,
imap_command.command.upper())
self.client.push(result)
# self.log.debug(result)
return
##################################################################
#
|
KeyboardInterrupt
|
dataset/ETHPy150Open scanner/asimap/asimap/client.py/BaseClientHandler.command
|
6,732
|
@requires_admin
def post(self, step_id):
step = Step.query.get(step_id)
if step is None:
return error("step not found", http_code=404)
args = self.parser.parse_args()
if args.implementation is not None:
step.implementation = args.implementation
if args.data is not None:
try:
data = json.loads(args.data)
except __HOLE__ as e:
return error("invalid JSON: %s" % e)
if not isinstance(data, dict):
return error("data must be a JSON mapping")
impl_cls = step.get_implementation(load=False)
if impl_cls is None:
return error("unable to load build step implementation")
try:
# XXX(dcramer): It's important that we deepcopy data so any
# mutations within the BuildStep don't propagate into the db
impl_cls(**deepcopy(data))
except Exception as exc:
return error("unable to create build step mapping provided data: %s" % exc)
step.data = data
if args.order is not None:
step.order = args.order
step.date_modified = datetime.utcnow()
db.session.add(step)
plan = step.plan
plan.date_modified = step.date_modified
db.session.add(plan)
for name in STEP_OPTIONS.keys():
value = args.get(name)
if value is None:
continue
create_or_update(ItemOption, where={
'item_id': step.id,
'name': name,
}, values={
'value': value,
})
db.session.commit()
return self.respond(step)
|
ValueError
|
dataset/ETHPy150Open dropbox/changes/changes/api/step_details.py/StepDetailsAPIView.post
|
6,733
|
def fromfile(file_h):
"""
Given a string file name, returns a GEOSGeometry. The file may contain WKB,
WKT, or HEX.
"""
# If given a file name, get a real handle.
if isinstance(file_h, six.string_types):
with open(file_h, 'rb') as file_h:
buf = file_h.read()
else:
buf = file_h.read()
# If we get WKB need to wrap in memoryview(), so run through regexes.
if isinstance(buf, bytes):
try:
decoded = buf.decode()
if wkt_regex.match(decoded) or hex_regex.match(decoded):
return GEOSGeometry(decoded)
except __HOLE__:
pass
else:
return GEOSGeometry(buf)
return GEOSGeometry(memoryview(buf))
|
UnicodeDecodeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/gis/geos/factory.py/fromfile
|
6,734
|
def get_data(self):
instances = []
try:
result = api.nova.hypervisor_search(self.request,
self.kwargs['hypervisor'])
for hypervisor in result:
try:
instances += hypervisor.servers
except __HOLE__:
pass
except Exception:
exceptions.handle(
self.request,
_('Unable to retrieve hypervisor instances list.'))
return instances
|
AttributeError
|
dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/dashboards/admin/hypervisors/views.py/AdminDetailView.get_data
|
6,735
|
def _register_action_alias(self, pack, action_alias):
action_alias_db = self._get_action_alias_db(pack=pack, action_alias=action_alias)
try:
action_alias_db.id = ActionAlias.get_by_name(action_alias_db.name).id
except __HOLE__:
LOG.debug('ActionAlias %s not found. Creating new one.', action_alias)
try:
action_alias_db = ActionAlias.add_or_update(action_alias_db)
extra = {'action_alias_db': action_alias_db}
LOG.audit('Action alias updated. Action alias %s from %s.', action_alias_db,
action_alias, extra=extra)
except Exception:
LOG.exception('Failed to create action alias %s.', action_alias_db.name)
raise
|
ValueError
|
dataset/ETHPy150Open StackStorm/st2/st2common/st2common/bootstrap/aliasesregistrar.py/AliasesRegistrar._register_action_alias
|
6,736
|
def clean(self, require_validation=True, check_unique=True):
key_attr = self.key.replace('-', '_')
# aa stands for auxilarary attribute.
if (not hasattr(self, key_attr) and
not hasattr(self, "_aa_" + key_attr)):
# ??? Do we want this?
if self.force_validation and require_validation:
raise ValidationError("No validator for key %s" % self.key)
else:
if check_unique: # Since we aren't call this later
self.validate_unique()
return
if hasattr(self, key_attr):
validate = getattr(self, key_attr)
else:
validate = getattr(self, "_aa_" + key_attr)
if not callable(validate):
raise ValidationError("No validator for key %s not callable" %
key_attr)
try:
validate()
except __HOLE__, e:
# We want to catch when the validator didn't accept the correct
# number of arguements.
raise ValidationError("%s" % str(e))
if check_unique:
self.validate_unique()
|
TypeError
|
dataset/ETHPy150Open mozilla/inventory/core/keyvalue/models.py/KeyValue.clean
|
6,737
|
def _unserialize(value):
if not isinstance(value, six.string_types):
return _cast_to_unicode(value)
try:
return _cast_to_unicode(json.loads(value))
except __HOLE__:
return _cast_to_unicode(value)
|
ValueError
|
dataset/ETHPy150Open niwinz/djorm-pgarray/djorm_pgarray/fields.py/_unserialize
|
6,738
|
def get_transform(self, name):
transform = super(ArrayField, self).get_transform(name)
if transform:
return transform
try:
index = int(name)
except __HOLE__:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self)
try:
start, end = name.split("_")
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
|
ValueError
|
dataset/ETHPy150Open niwinz/djorm-pgarray/djorm_pgarray/fields.py/ArrayField.get_transform
|
6,739
|
def apply_lib(doc, funcs, app_dir, objs):
for k, v in funcs.items():
if not isinstance(v, basestring):
continue
else:
logger.debug("process function: %s" % k)
old_v = v
try:
funcs[k] = run_json_macros(doc,
run_code_macros(v, app_dir), app_dir)
except __HOLE__, e:
raise MacroError(
"Error running !code or !json on function \"%s\": %s" % (k, e))
if old_v != funcs[k]:
objs[md5(to_bytestring(funcs[k])).hexdigest()] = old_v
|
ValueError
|
dataset/ETHPy150Open benoitc/couchdbkit/couchdbkit/designer/macros.py/apply_lib
|
6,740
|
def run_code_macros(f_string, app_dir):
def rreq(mo):
# just read the file and return it
path = os.path.join(app_dir, mo.group(2).strip())
library = ''
filenum = 0
for filename in glob.iglob(path):
logger.debug("process code macro: %s" % filename)
try:
cnt = read_file(filename)
if cnt.find("!code") >= 0:
cnt = run_code_macros(cnt, app_dir)
library += cnt
except __HOLE__, e:
raise MacroError(str(e))
filenum += 1
if not filenum:
raise MacroError(
"Processing code: No file matching '%s'" % mo.group(2))
return library
re_code = re.compile('(\/\/|#)\ ?!code (.*)')
return re_code.sub(rreq, f_string)
|
IOError
|
dataset/ETHPy150Open benoitc/couchdbkit/couchdbkit/designer/macros.py/run_code_macros
|
6,741
|
def run_json_macros(doc, f_string, app_dir):
included = {}
varstrings = []
def rjson(mo):
if mo.group(2).startswith('_attachments'):
# someone want to include from attachments
path = os.path.join(app_dir, mo.group(2).strip())
filenum = 0
for filename in glob.iglob(path):
logger.debug("process json macro: %s" % filename)
library = ''
try:
if filename.endswith('.json'):
library = read_json(filename)
else:
library = read_file(filename)
except __HOLE__, e:
raise MacroError(str(e))
filenum += 1
current_file = filename.split(app_dir)[1]
fields = current_file.split('/')
count = len(fields)
include_to = included
for i, field in enumerate(fields):
if i+1 < count:
include_to[field] = {}
include_to = include_to[field]
else:
include_to[field] = library
if not filenum:
raise MacroError(
"Processing code: No file matching '%s'" % mo.group(2))
else:
logger.debug("process json macro: %s" % mo.group(2))
fields = mo.group(2).strip().split('.')
library = doc
count = len(fields)
include_to = included
for i, field in enumerate(fields):
if not field in library:
logger.warning(
"process json macro: unknown json source: %s" % mo.group(2))
break
library = library[field]
if i+1 < count:
include_to[field] = include_to.get(field, {})
include_to = include_to[field]
else:
include_to[field] = library
return f_string
def rjson2(mo):
return '\n'.join(varstrings)
re_json = re.compile('(\/\/|#)\ ?!json (.*)')
re_json.sub(rjson, f_string)
if not included:
return f_string
for k, v in included.iteritems():
varstrings.append("var %s = %s;" % (k, json.dumps(v).encode('utf-8')))
return re_json.sub(rjson2, f_string)
|
IOError
|
dataset/ETHPy150Open benoitc/couchdbkit/couchdbkit/designer/macros.py/run_json_macros
|
6,742
|
def restart_apache():
try:
check_apache_running()
apache = subprocess.Popen(['apachectl', 'restart'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
except __HOLE__:
apache = subprocess.Popen(['apachectl', 'start'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
apache.communicate()
subprocess.call(['sleep', '3'])
|
ValueError
|
dataset/ETHPy150Open nasa-gibs/onearth/src/test/oe_test_utils.py/restart_apache
|
6,743
|
def find_string(file_path, string):
try:
with open(file_path, 'r') as f:
result = any(line for line in f if string in line)
except __HOLE__:
result = False
return result
|
OSError
|
dataset/ETHPy150Open nasa-gibs/onearth/src/test/oe_test_utils.py/find_string
|
6,744
|
def create_continuous_period_test_files(path, period_units, period_length, num_periods, start_datetime, prefix='',
suffix='_.mrf', prototype_file=None, make_year_dirs=False, no_files=False):
"""
Fills a directory structure with files that have a continuous period interval between them
using the specified parameters.
Arguments:
path -- base directory tree to populate.
period_units -- unit size of each period in 'days', 'months', or 'years'.
period_length -- the length of each period in the aforementioned units.
num_periods -- the number of period files to create.
start_date -- a datetime.datetime object with the desired start date.
prefix -- (optional) a string to append to the beginning of each filename.
suffix -- (optional) a string to append to the end of each filename.
prototype_file -- (optional) a prototype file to create each copy from (otherwise creates just empty files).
make_year_dirs -- (optional) choose to create separate year dirs for the created files instead of dumping them all
in one dir.
no_files -- (optional) returns a list of dates but creates no files.
"""
if not no_files:
make_dir_tree(path)
# Keep track of each date so we can evaluate if a new year directory needs to be created.
test_dates = []
date = start_datetime
year_dir = ''
# Create a set of date intervals and corresponding dummy files
for x in range(0, num_periods + 1):
test_dates.append(date)
if any(unit in period_units for unit in ('hours', 'minutes', 'seconds')):
subdaily = True
else:
subdaily = False
if not no_files:
# Create year directory if requested
if make_year_dirs and (not x or test_dates[-1].year != date.year):
year_dir = str(date.year)
make_dir_tree(os.path.join(path, year_dir))
# Assemble new filename and create file, using prototype if specified
if subdaily is True:
time_string = str(date.hour).zfill(2) + str(date.minute).zfill(2) + str(date.second).zfill(2)
else:
time_string = ''
filename = prefix + str(date.year) + str(date.timetuple().tm_yday).zfill(3) + time_string + suffix
output_path = os.path.join(path, year_dir)
output_file = os.path.join(output_path, filename)
if prototype_file:
try:
copyfile(prototype_file, output_file)
except __HOLE__:
pass
else:
open(output_file, 'a').close()
date += relativedelta(**{period_units: period_length})
return test_dates
|
OSError
|
dataset/ETHPy150Open nasa-gibs/onearth/src/test/oe_test_utils.py/create_continuous_period_test_files
|
6,745
|
def create_intermittent_period_test_files(path, period_units, period_length, num_periods, start_datetime, prefix='',
suffix='_.mrf', prototype_file=None, make_year_dirs=False, no_files=False):
"""
Fills a directory structure with files that have an intermittent period
using the specified parameters. Returns a list of all the date intervals
that were created.
Arguments:
path -- base directory tree to populate.
period_units -- unit size of each period in 'days', 'months', or 'years'.
period_length -- the length of each period in the aforementioned units
num_periods -- the number of interval pairs to create.
start_date -- a datetime.date object with the desired start date
prefix -- (optional) a string to append to the beginning of each filename.
suffix -- (optional) a string to append to the end of each filename.
prototype_file -- (optional) a prototype file to create each copy from (otherwise creates just empty files).
make_year_dirs -- (optional) choose to create separate year dirs for the created files instead of dumping them all
in one dir.
no_files -- (optional) returns a list of dates but creates no files.
"""
if not no_files:
make_dir_tree(path)
# Create a list of date intervals, each separated by the specified period length
test_dates = []
year_dir = ''
for x in range(num_periods):
# Create a new start date and end date for each interval requested
interval_set = []
for y in range(1, 5):
date = start_datetime + relativedelta(**{period_units: period_length * y})
interval_set.append(date)
test_dates.append(interval_set)
# Push the start time of the next interval to twice the period distance from the end of the last interval
start_datetime = interval_set[-1] + relativedelta(**{period_units: period_length * 2})
if not no_files:
if any(unit in period_units for unit in ('hours', 'minutes', 'seconds')):
subdaily = True
else:
subdaily = False
# If this is the first date or it has a different year than the previous, create that dir
if make_year_dirs and (not x or test_dates[-1][-1].year != date.year):
year_dir = str(date.year)
make_dir_tree(os.path.join(path, year_dir))
for interval in interval_set:
if subdaily is True:
time_string = str(interval.hour).zfill(2) + str(interval.minute).zfill(2) + str(interval.second).zfill(2)
else:
time_string = ''
filename = prefix + str(interval.year) + str(interval.timetuple().tm_yday).zfill(3) + time_string + suffix
output_path = os.path.join(path, year_dir)
output_file = os.path.join(output_path, filename)
if prototype_file:
try:
copyfile(prototype_file, output_file)
except __HOLE__:
pass
else:
open(output_file, 'a').close()
return test_dates
|
OSError
|
dataset/ETHPy150Open nasa-gibs/onearth/src/test/oe_test_utils.py/create_intermittent_period_test_files
|
6,746
|
def get_layer_config(filepath, archive_config):
"""
Parses a layer config XML file and its associated environment config file
and returns a dict with relevant values. Generally, <TagName> turns into config['tag_name'].
Arguments:
filepath -- path to the layer config file
archive config -- path to the archive config file
"""
config = {}
# Get the layer, environment, and archive config DOMs
with open(filepath, "r") as lc:
config_dom = xml.dom.minidom.parse(lc)
env_config = config_dom.getElementsByTagName("EnvironmentConfig")[0].firstChild.nodeValue
with open(env_config, "r") as env:
env_dom = xml.dom.minidom.parse(env)
with open(archive_config, "r") as archive:
archive_dom = xml.dom.minidom.parse(archive)
# Get archive root path and the archive location
archive_root = config_dom.getElementsByTagName('ArchiveLocation')[0].attributes['root'].value
config['archive_basepath'] = next(loc.getElementsByTagName('Location')[0].firstChild.nodeValue for loc in archive_dom.getElementsByTagName('Archive') if loc.attributes['id'].value == archive_root)
config['archive_location'] = os.path.join(config['archive_basepath'], config_dom.getElementsByTagName('ArchiveLocation')[0].firstChild.nodeValue)
# Add everything we need from the environment config
staging_locations = env_dom.getElementsByTagName('StagingLocation')
config['wmts_staging_location'] = next((loc.firstChild.nodeValue for loc in staging_locations if loc.attributes["service"].value == "wmts"), None)
config['twms_staging_location'] = next((loc.firstChild.nodeValue for loc in staging_locations if loc.attributes["service"].value == "twms"), None)
config['cache_location'] = next((loc.firstChild.nodeValue for loc in env_dom.getElementsByTagName("CacheLocation") if loc.attributes["service"].value == "wmts"), None)
config['wmts_gc_path'] = next((loc.firstChild.nodeValue for loc in env_dom.getElementsByTagName("GetCapabilitiesLocation") if loc.attributes["service"].value == "wmts"), None)
config['twms_gc_path'] = next((loc.firstChild.nodeValue for loc in env_dom.getElementsByTagName("GetCapabilitiesLocation") if loc.attributes["service"].value == "twms"), None)
config['colormap_locations'] = [loc for loc in env_dom.getElementsByTagName("ColorMapLocation")]
config['legend_location'] = env_dom.getElementsByTagName('LegendLocation')[0].firstChild.nodeValue
# Add everything we need from the layer config
config['prefix'] = config_dom.getElementsByTagName("FileNamePrefix")[0].firstChild.nodeValue
config['identifier'] = config_dom.getElementsByTagName("Identifier")[0].firstChild.nodeValue
config['time'] = config_dom.getElementsByTagName("Time")[0].firstChild.nodeValue
config['tiled_group_name'] = config_dom.getElementsByTagName("TiledGroupName")[0].firstChild.nodeValue
config['colormaps'] = config_dom.getElementsByTagName("ColorMap")
try:
config['empty_tile'] = config_dom.getElementsByTagName('EmptyTile')[0].firstChild.nodeValue
except __HOLE__:
config['empty_tile_size'] = config_dom.getElementsByTagName('EmptyTileSize')[0].firstChild.nodeValue
config['year_dir'] = False
try:
if config_dom.getElementsByTagName('ArchiveLocation')[0].attributes['year'].value == 'true':
config['year_dir'] = True
except KeyError:
pass
return config
|
IndexError
|
dataset/ETHPy150Open nasa-gibs/onearth/src/test/oe_test_utils.py/get_layer_config
|
6,747
|
def make_dir_tree(path, ignore_existing=False):
"""
Creates the specified directory tree. Throws an error
and doesn't do anything if there are already files in that dir.
Kind of like 'mkdir -p'.
Arguments:
path -- path to be created
"""
try:
os.makedirs(path)
except __HOLE__:
if os.listdir(path):
if not ignore_existing:
raise OSError("Target directory {0} is not empty.".format(path))
else:
pass
else:
pass
return
|
OSError
|
dataset/ETHPy150Open nasa-gibs/onearth/src/test/oe_test_utils.py/make_dir_tree
|
6,748
|
@webapi_check_login_required
@webapi_check_local_site
def get(self, request, *args, **kwargs):
"""Returns the last update made to the review request.
This shows the type of update that was made, the user who made the
update, and when the update was made. Clients can use this to inform
the user that the review request was updated, or automatically update
it in the background.
This does not take into account changes to a draft review request, as
that's generally not update information that the owner of the draft is
interested in. Only public updates are represented.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
except __HOLE__:
return DOES_NOT_EXIST
if not resources.review_request.has_access_permissions(request,
review_request):
return self.get_no_access_error(request)
timestamp, updated_object = review_request.get_last_activity()
etag = encode_etag('%s:%s' % (timestamp, updated_object.pk))
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
user = None
summary = None
update_type = None
if isinstance(updated_object, ReviewRequest):
user = updated_object.submitter
if updated_object.status == ReviewRequest.SUBMITTED:
summary = _("Review request submitted")
elif updated_object.status == ReviewRequest.DISCARDED:
summary = _("Review request discarded")
else:
summary = _("Review request updated")
update_type = "review-request"
elif isinstance(updated_object, DiffSet):
summary = _("Diff updated")
update_type = "diff"
elif isinstance(updated_object, Review):
user = updated_object.user
if updated_object.is_reply():
summary = _("New reply")
update_type = "reply"
else:
summary = _("New review")
update_type = "review"
else:
# Should never be able to happen. The object will always at least
# be a ReviewRequest.
assert False
return 200, {
self.item_result_key: {
'timestamp': timestamp,
'user': user,
'summary': summary,
'type': update_type,
}
}, {
'ETag': etag,
}
|
ObjectDoesNotExist
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/review_request_last_update.py/ReviewRequestLastUpdateResource.get
|
6,749
|
def try_match(regexp, s, i):
r = re.compile(regexp)
m = r.match(s, i)
if m:
try:
g = m.group(1)
except __HOLE__:
g = ''
return m.end(0), g
else:
raise NotFound()
# List of tags from http://www.w3schools.com/tags/ref_byfunc.asp
|
IndexError
|
dataset/ETHPy150Open hcs42/ExponWords/models.py/try_match
|
6,750
|
def incr_wcd(wcd, wdict, strength, due_date, ask_date, count):
strength_to_word_count = wcd.setdefault((wdict, ask_date), {})
key = (strength, due_date)
try:
strength_to_word_count[key] += count
except __HOLE__:
strength_to_word_count[key] = count
|
KeyError
|
dataset/ETHPy150Open hcs42/ExponWords/models.py/incr_wcd
|
6,751
|
def run_as_another_namespace(
pid,
namespaces,
function,
*args,
**kwargs
):
hack_to_pre_load_modules()
context = ProcessContext(pid, namespaces)
context.attach()
queue = multiprocessing.Queue(2 ** 15)
try:
child_process = multiprocessing.Process(
name='crawler-%s' %
pid, target=function_wrapper, args=(
queue, function, args), kwargs=kwargs)
child_process.start()
except __HOLE__:
queue.close()
raise CrawlError()
child_exception = None
try:
(result, child_exception) = queue.get(timeout=IN_CONTAINER_TIMEOUT)
except Queue.Empty:
child_exception = CrawlTimeoutError()
except Exception:
result = None
if child_exception:
result = None
child_process.join(IN_CONTAINER_TIMEOUT)
# The join failed and the process might still be alive
if child_process.is_alive():
errmsg = ('Timed out waiting for process %d to exit.' %
child_process.pid)
queue.close()
os.kill(child_process.pid, 9)
context.detach()
logger.error(errmsg)
raise CrawlTimeoutError(errmsg)
context.detach()
if result is None:
if child_exception:
raise child_exception
raise CrawlError('Unknown crawl error.')
return result
|
OSError
|
dataset/ETHPy150Open cloudviz/agentless-system-crawler/crawler/namespace.py/run_as_another_namespace
|
6,752
|
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a document instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
cleaned_data = form.cleaned_data
file_field_list = []
# check wether object is instantiated
if isinstance(instance, type):
instance = instance()
for f in instance._fields.values():
if isinstance(f, ObjectIdField):
continue
if f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, FileField) or \
(isinstance(f, (MapField, ListField)) and
isinstance(f.field, FileField)):
file_field_list.append(f)
else:
setattr(instance, f.name, cleaned_data.get(f.name))
for f in file_field_list:
if isinstance(f, MapField):
map_field = getattr(instance, f.name)
uploads = cleaned_data[f.name]
for key, uploaded_file in uploads.items():
if uploaded_file is None:
continue
file_data = map_field.get(key, None)
map_field[key] = _save_iterator_file(f, instance,
uploaded_file, file_data)
setattr(instance, f.name, map_field)
elif isinstance(f, ListField):
list_field = getattr(instance, f.name)
uploads = cleaned_data[f.name]
for i, uploaded_file in enumerate(uploads):
if uploaded_file is None:
continue
try:
file_data = list_field[i]
except IndexError:
file_data = None
file_obj = _save_iterator_file(f, instance,
uploaded_file, file_data)
try:
list_field[i] = file_obj
except __HOLE__:
list_field.append(file_obj)
setattr(instance, f.name, list_field)
else:
field = getattr(instance, f.name)
upload = cleaned_data[f.name]
if upload is None:
continue
try:
upload.file.seek(0)
# delete first to get the names right
if field.grid_id:
field.delete()
filename = _get_unique_filename(upload.name, f.db_alias,
f.collection_name)
field.put(upload, content_type=upload.content_type,
filename=filename)
setattr(instance, f.name, field)
except AttributeError:
# file was already uploaded and not changed during edit.
# upload is already the gridfsproxy object we need.
upload.get()
setattr(instance, f.name, upload)
return instance
|
IndexError
|
dataset/ETHPy150Open jschrewe/django-mongodbforms/mongodbforms/documents.py/construct_instance
|
6,753
|
def __new__(cls, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
try:
parents = [
b for b in bases
if issubclass(b, DocumentForm) or
issubclass(b, EmbeddedDocumentForm)
]
except __HOLE__:
# We are defining DocumentForm itself.
parents = None
new_class = super(DocumentFormMetaclass, cls).__new__(cls, name,
bases, attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = media_property(new_class)
opts = new_class._meta = ModelFormOptions(
getattr(new_class, 'Meta', None)
)
if opts.document:
formfield_generator = getattr(opts,
'formfield_generator',
_fieldgenerator)
# If a model is defined, extract form fields from it.
fields = fields_for_document(opts.document, opts.fields,
opts.exclude, opts.widgets,
formfield_callback,
formfield_generator)
# make sure opts.fields doesn't specify an invalid field
none_document_fields = [k for k, v in fields.items() if not v]
missing_fields = (set(none_document_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
|
NameError
|
dataset/ETHPy150Open jschrewe/django-mongodbforms/mongodbforms/documents.py/DocumentFormMetaclass.__new__
|
6,754
|
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields,
opts.exclude)
changed_fields = getattr(self.instance, '_changed_fields', [])
exclude = self._get_validation_exclusions()
try:
for f in self.instance._fields.values():
value = getattr(self.instance, f.name)
if f.name not in exclude:
f.validate(value)
elif value in EMPTY_VALUES and f.name not in changed_fields:
# mongoengine chokes on empty strings for fields
# that are not required. Clean them up here, though
# this is maybe not the right place :-)
setattr(self.instance, f.name, None)
# opts._dont_save.append(f.name)
except ValidationError as e:
err = {f.name: [e.message]}
self._update_errors(err)
# Call validate() on the document. Since mongoengine
# does not provide an argument to specify which fields
# should be excluded during validation, we replace
# instance._fields_ordered with a version that does
# not include excluded fields. The attribute gets
# restored after validation.
original_fields = self.instance._fields_ordered
self.instance._fields_ordered = tuple(
[f for f in original_fields if f not in exclude]
)
try:
self.instance.validate()
except __HOLE__ as e:
if MONGO_NON_FIELD_ERRORS in e.errors:
error = e.errors.get(MONGO_NON_FIELD_ERRORS)
else:
error = e.message
self._update_errors({NON_FIELD_ERRORS: [error, ]})
finally:
self.instance._fields_ordered = original_fields
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
|
ValidationError
|
dataset/ETHPy150Open jschrewe/django-mongodbforms/mongodbforms/documents.py/BaseDocumentForm._post_clean
|
6,755
|
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
try:
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
except (KeyError, __HOLE__):
fail_message = 'embedded document saved'
obj = save_instance(self, self.instance, self._meta.fields,
fail_message, commit, construct=False)
return obj
|
AttributeError
|
dataset/ETHPy150Open jschrewe/django-mongodbforms/mongodbforms/documents.py/BaseDocumentForm.save
|
6,756
|
def construct_initial(self):
initial = []
try:
for d in self.get_queryset():
initial.append(document_to_dict(d))
except __HOLE__:
pass
return initial
|
TypeError
|
dataset/ETHPy150Open jschrewe/django-mongodbforms/mongodbforms/documents.py/BaseDocumentFormSet.construct_initial
|
6,757
|
def save(self, commit=True):
"""
Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
saved = []
for form in self.forms:
if not form.has_changed() and form not in self.initial_forms:
continue
obj = self.save_object(form)
if form.cleaned_data.get("DELETE", False):
try:
obj.delete()
except __HOLE__:
# if it has no delete method it is an embedded object. We
# just don't add to the list and it's gone. Cool huh?
continue
if commit:
obj.save()
saved.append(obj)
return saved
|
AttributeError
|
dataset/ETHPy150Open jschrewe/django-mongodbforms/mongodbforms/documents.py/BaseDocumentFormSet.save
|
6,758
|
def save(self, commit=True):
# Don't try to save the new documents. Embedded objects don't have
# a save method anyway.
objs = super(EmbeddedDocumentFormSet, self).save(commit=False)
objs = objs or []
if commit and self.parent_document is not None:
field = self.parent_document._fields.get(
self.form._meta.embedded_field, None)
if isinstance(field, EmbeddedDocumentField):
try:
obj = objs[0]
except __HOLE__:
obj = None
setattr(
self.parent_document, self.form._meta.embedded_field, obj)
else:
setattr(
self.parent_document, self.form._meta.embedded_field, objs)
self.parent_document.save()
return objs
|
IndexError
|
dataset/ETHPy150Open jschrewe/django-mongodbforms/mongodbforms/documents.py/EmbeddedDocumentFormSet.save
|
6,759
|
def load_plugins(self):
"""
Create Plugin objects
"""
# Check if BasePlugin template is present
# or raise exception
try:
self.get_template('BasePlugin')
except (TemplateNotFound, NoTemplatesConfigured):
lg.error("Required BasePlugin template is not configured!")
raise BasePluginTemplateNotFound
for plugin, options in self.conf_plugins.iteritems():
if options.has_key('Enabled') and options['Enabled'] == False:
lg.info("Plugin %s is disabled, skipping.." % plugin)
continue
try:
self.plugins[plugin] = self.load_plugin(plugin, options)
except TemplateNotFound:
lg.error("Can't find configured template %s for plugin %s, plugin not loaded" % (options['Template'], plugin))
continue
except NoTemplatesConfigured:
lg.error("There are no templates configured, template %s is required by plugin %s, plugin not loaded" % (options['Template'], plugin))
continue
except __HOLE__ as e:
lg.error("Plugin %s not loaded: AssertionError, %s" % (plugin, e))
continue
except Exception as e:
lg.error("Plugin %s not loaded: %s" % (plugin, e))
lg.exception(e)
continue
lg.info("Loaded plugin %s" % plugin)
if len(self.plugins) == 0:
lg.error("No plugins loaded!")
raise NoRunningPlugins("No plugins loaded!")
|
AssertionError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/PluginManager.load_plugins
|
6,760
|
def get_template(self, name):
"""
Return template parameters
"""
if not isinstance(self.conf_templates, dict):
raise NoTemplatesConfigured
try:
params = self.conf_templates[name]
except __HOLE__:
raise TemplateNotFound("Can't find configured template %s" % name)
return params
|
KeyError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/PluginManager.get_template
|
6,761
|
def get_action(self, name):
"""
Return template parameters
"""
if not isinstance(self.conf_actions, dict):
raise NoActionsConfigured
try:
params = self.conf_actions[name]
except __HOLE__:
raise ActionNotFound("Can't find configured action %s" % name)
return params
|
KeyError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/PluginManager.get_action
|
6,762
|
def get_plugin(self, name):
"""
Return single plugin
"""
try:
return self.plugins[name]
except __HOLE__:
raise NoSuchPlugin("Plugin %s not found" % name)
|
KeyError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/PluginManager.get_plugin
|
6,763
|
def run_command(self, command, timeout=0):
"""
Run system command and parse output
"""
result = Result()
lg.debug("Plugin %s: executing command %s" % (self.name, command))
try:
stdout, stderr, returncode = smoker.util.command.execute(
command, timeout=timeout)
except smoker.util.command.ExecutionTimeout as e:
raise PluginExecutionTimeout(e)
except Exception as e:
lg.exception(e)
raise PluginExecutionError(
"Can't execute command %s: %s" % (command, e))
if returncode:
status = 'ERROR'
else:
status = 'OK'
# Run parser or parse output from stdin
if self.params['Parser']:
try:
result = self.run_parser(stdout, stderr)
except Exception as e:
# Error result
result.set_status('ERROR')
result.add_error(re.sub('^\n', '', stderr.strip()))
result.add_error('Parser run failed: %s' % e)
result.add_info(re.sub('^\n', '', stdout.strip()))
else:
# Try to parse JSON output
json = None
try:
json = simplejson.loads(stdout)
except:
pass
if json:
# Output is JSON, check it has valid status or raise exception
if json.has_key('status') and json['status'] in [ 'OK', 'ERROR', 'WARN' ]:
try:
result.set_result(json, validate=True)
except __HOLE__ as e:
raise PluginMalformedOutput("Invalid JSON structure: %s" % e)
else:
raise PluginMalformedOutput("Missing status in JSON output: %s" % json)
else:
# Output is not JSON, use stdout/stderr and return value
lg.debug("Plugin %s: using non-JSON output" % self.name)
result.set_status(status)
if stderr:
result.add_error(re.sub('^\n', '', stderr.strip()))
if stdout:
result.add_info(re.sub('^\n', '', stdout.strip()))
return result
|
ValidationError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/PluginWorker.run_command
|
6,764
|
def run_parser(self, stdout, stderr):
"""
Run parser on given stdout/stderr
Raise exceptions if anything happen
"""
lg.debug("Plugin %s: running parser %s" % (self.name, self.params['Parser']))
if stdout:
lg.debug("Plugin %s: stdout: %s" % (self.name, stdout.strip()))
if stderr:
lg.debug("Plugin %s: stderr: %s" % (self.name, stderr.strip()))
try:
parser = __import__(self.params['Parser'], globals(), locals(), ['Parser'], -1)
except __HOLE__ as e:
lg.error("Plugin %s: can't load parser %s: %s" % (self.name, self.params['Parser'], e))
raise
try:
parser = parser.Parser(stdout, stderr)
except Exception as e:
lg.error("Plugin %s: can't initialize parser: %s" % (self.name, e))
lg.exception(e)
raise
try:
result = parser.parse()
except Exception as e:
lg.error("Plugin %s: parser execution failed: %s" % (self.name, e))
lg.exception(e)
raise
return result
|
ImportError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/PluginWorker.run_parser
|
6,765
|
def run_module(self, module, **kwargs):
"""
Run Python module
Raise exceptions if anything happen
"""
lg.debug("Plugin %s: running module %s" % (self.name, module))
try:
plugin = __import__(module, globals(), locals(), ['Plugin'], -1)
except __HOLE__ as e:
lg.error("Plugin %s: can't load module %s: %s" % (self.name, module, e))
raise
try:
plugin = plugin.Plugin(self, **kwargs)
except Exception as e:
lg.error("Plugin %s: can't initialize plugin module: %s" % (self.name, e))
lg.exception(e)
raise
signal.signal(signal.SIGALRM, alarm_handler)
if 'timeout' not in kwargs:
kwargs['timeout'] = self.get_param('Timeout', default=120)
try:
signal.alarm(kwargs['timeout'])
result = plugin.run()
except PluginExecutionTimeout:
result = Result()
result.set_status('ERROR')
result.add_error('Plugin execution exceeded timeout %d seconds' %
kwargs['timeout'])
except Exception as e:
lg.error("Plugin %s: module execution failed: %s" % (self.name, e))
lg.exception(e)
signal.alarm(0)
raise
signal.alarm(0)
return result
|
ImportError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/PluginWorker.run_module
|
6,766
|
def run_plugin(self, force=False):
"""
Run plugin, save result and schedule next run
:param force: forced run
:type force: bool
"""
# External command will be executed
if self.params['Command']:
command = self.params['Command'] % self.escape(dict(self.params))
# Execute external command to get result
try:
result = self.run_command(command, self.params['Timeout'])
except Exception as e:
lg.error("Plugin %s: %s" % (self.name, e))
result = Result()
result.set_status('ERROR')
result.add_error(e)
# Python module will be executed
elif self.params['Module']:
try:
result = self.run_module(self.params['Module'])
except Exception as e:
lg.error("Plugin %s: %s" % (self.name, e))
result = Result()
result.set_status('ERROR')
result.add_error(re.sub('^\n', '', ('%s' % e).strip()))
# No module or command to run
else:
lg.error("Plugin %s: no Command or Module to execute!" % self.name)
result = Result()
result.set_status('ERROR')
result.add_error('No Command or Module to execute!')
# Run action on result
if self.params['Action']:
lg.debug("Plugin %s: executing action" % self.name)
# Execute external command
if self.params['Action']['Command']:
# Add parameters to command with format
params = dict(self.params, **result.result)
params = self.escape(params)
try:
action = self.run_command(self.params['Action']['Command'] % params, timeout=self.params['Action']['Timeout'])
except Exception as e:
lg.error("Plugin %s: %s" % (self.name, e))
action = Result()
action.set_status('ERROR')
action.add_error(e)
# Execute Python module
elif self.params['Action']['Module']:
try:
action = self.run_module(self.params['Action']['Module'], result=result)
except Exception as e:
lg.error("Plugin %s: %s" % (self.name, e))
action = Result()
action.set_status('ERROR')
action.add_error(e)
# No command or module to execute
else:
lg.error("Plugin %s: no Action Command or Module to execute!" % self.name)
action = Result()
action.set_status('ERROR')
action.add_error('No Command or Module to execute!')
# Add action result to plugin result
result.set_action(action)
result.set_forced(force)
# send to the daemon
try:
self.result = result.get_result()
except __HOLE__ as e:
lg.error("Plugin %s: ValidationError: %s" % (self.name, e))
result = Result()
result.set_status('ERROR')
result.add_error('ValidationError: %s' % e)
result.set_forced(force)
self.result = result.get_result()
# Log result
lg.info("Plugin %s result: %s" % (self.name, result.get_result()))
|
ValidationError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/PluginWorker.run_plugin
|
6,767
|
def get_param(self, name, default=None):
"""
Get plugin parameter
Return default if parameter doesn't exist
"""
try:
return self.params[name]
except __HOLE__:
return default
|
KeyError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/PluginWorker.get_param
|
6,768
|
def drop_privileged(self):
if (self.params['uid'] == 'default' and
self.params['gid'] == 'default'):
return
lg.debug("Plugin %s: dropping privileges to %s/%s"
% (self.name, self.params['uid'], self.params['gid']))
try:
os.setegid(self.params['gid'])
os.seteuid(self.params['uid'])
except __HOLE__ as e:
lg.error("Plugin %s: config parameters uid/gid have to be "
"integers: %s" % (self.name, e))
raise
except OSError as e:
lg.error("Plugin %s: can't switch effective UID/GID to %s/%s: %s"
% (self.name, self.params['uid'], self.params['gid'], e))
raise
|
TypeError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/PluginWorker.drop_privileged
|
6,769
|
def add_msg(self, level, msg, multiline=False):
"""
Add message into result
Empty messages are skipped
multiline - don't split message lines into
multiple messages
"""
# Create messages structure if it doesn't exists
if not self.result['messages']:
self.result['messages'] = {
'info' : [],
'error': [],
'warn' : [],
}
if not multiline:
messages = str(msg).split('\n')
else:
messages = [str(msg)]
for message in messages:
# Skip adding empty message
if not str(msg).strip():
continue
try:
self.result['messages'][level].append(str(message).strip())
except __HOLE__:
raise InvalidArgument("Level has to be info, error or warn")
|
KeyError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/Result.add_msg
|
6,770
|
def _validate_component_result(self, result):
"""
Validate componentResults
"""
# Component result can be empty
if result == None:
return True
if not isinstance(result, dict):
raise ValidationError("Component result must be dictionary")
for name, component in result.iteritems():
try:
self._validate_msg(component['messages'])
except KeyError:
raise ValidationError("Component %s doesn't have message" % name)
try:
self._validate_status(component['status'])
except __HOLE__:
raise ValidationError("Component %s doesn't have status" % name)
|
KeyError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/Result._validate_component_result
|
6,771
|
def _validate_action(self, result):
"""
Validate action result
"""
# Action can be empty
if result == None:
return True
if not isinstance(result, dict):
raise ValidationError("Action result must be dictionary")
try:
self._validate_msg(result['messages'])
except __HOLE__:
raise ValidationError("Action doesn't have message")
try:
self._validate_status(result['status'])
except KeyError:
raise ValidationError("Action doesn't have status")
|
KeyError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/Result._validate_action
|
6,772
|
def set_result(self, result, validate=False):
"""
Set result
Not all fields have to be filled
This method should be rarely used because it can
cause invalid results
Use validate=True and catch ValidationError to ensure
given result is correct
Result = <{
'status' : 'OK' | 'WARN' | 'ERROR',
'messages' : Messages | NULL,
'componentResults' : (STRING : {
'status' : 'OK' | 'WARN' | 'ERROR',
'messages': Messages
})* | NULL,
'action' : Result | NULL
}>
Messages = {
'info' : [STRING],
'error': [STRING],
'warn' : [STRING]
}
"""
fields = [ 'status', 'messages', 'componentResults', 'action' ]
for field in fields:
try:
self.result[field] = result[field]
except __HOLE__:
# just skip missing fields
pass
if validate == True:
self.validate()
|
KeyError
|
dataset/ETHPy150Open gooddata/smoker/smoker/server/plugins/__init__.py/Result.set_result
|
6,773
|
def main():
D = {}
a = False
try:
a = D['XXX']
except __HOLE__:
a = True
TestError( a == True )
|
KeyError
|
dataset/ETHPy150Open PythonJS/PythonJS/regtests/exceptions/KeyError.py/main
|
6,774
|
def _from_recon_cache(self, cache_keys, cache_file, openr=open):
"""retrieve values from a recon cache file
:params cache_keys: list of cache items to retrieve
:params cache_file: cache file to retrieve items from.
:params openr: open to use [for unittests]
:return: dict of cache items and their values or none if not found
"""
try:
with openr(cache_file, 'r') as f:
recondata = json.load(f)
return dict((key, recondata.get(key)) for key in cache_keys)
except IOError:
self.logger.exception(_('Error reading recon cache file'))
except __HOLE__:
self.logger.exception(_('Error parsing recon cache file'))
except Exception:
self.logger.exception(_('Error retrieving recon data'))
return dict((key, None) for key in cache_keys)
|
ValueError
|
dataset/ETHPy150Open openstack/swift/swift/common/middleware/recon.py/ReconMiddleware._from_recon_cache
|
6,775
|
def get_unmounted(self):
"""list unmounted (failed?) devices"""
mountlist = []
for entry in os.listdir(self.devices):
if not os.path.isdir(os.path.join(self.devices, entry)):
continue
try:
mounted = check_mount(self.devices, entry)
except __HOLE__ as err:
mounted = str(err)
mpoint = {'device': entry, 'mounted': mounted}
if mpoint['mounted'] is not True:
mountlist.append(mpoint)
return mountlist
|
OSError
|
dataset/ETHPy150Open openstack/swift/swift/common/middleware/recon.py/ReconMiddleware.get_unmounted
|
6,776
|
def get_diskusage(self):
"""get disk utilization statistics"""
devices = []
for entry in os.listdir(self.devices):
if not os.path.isdir(os.path.join(self.devices, entry)):
continue
try:
mounted = check_mount(self.devices, entry)
except __HOLE__ as err:
devices.append({'device': entry, 'mounted': str(err),
'size': '', 'used': '', 'avail': ''})
continue
if mounted:
path = os.path.join(self.devices, entry)
disk = os.statvfs(path)
capacity = disk.f_bsize * disk.f_blocks
available = disk.f_bsize * disk.f_bavail
used = disk.f_bsize * (disk.f_blocks - disk.f_bavail)
devices.append({'device': entry, 'mounted': True,
'size': capacity, 'used': used,
'avail': available})
else:
devices.append({'device': entry, 'mounted': False,
'size': '', 'used': '', 'avail': ''})
return devices
|
OSError
|
dataset/ETHPy150Open openstack/swift/swift/common/middleware/recon.py/ReconMiddleware.get_diskusage
|
6,777
|
def get_ring_md5(self, openr=open):
"""get all ring md5sum's"""
sums = {}
for ringfile in self.rings:
md5sum = md5()
if os.path.exists(ringfile):
try:
with openr(ringfile, 'rb') as f:
block = f.read(4096)
while block:
md5sum.update(block)
block = f.read(4096)
sums[ringfile] = md5sum.hexdigest()
except __HOLE__ as err:
sums[ringfile] = None
if err.errno != errno.ENOENT:
self.logger.exception(_('Error reading ringfile'))
return sums
|
IOError
|
dataset/ETHPy150Open openstack/swift/swift/common/middleware/recon.py/ReconMiddleware.get_ring_md5
|
6,778
|
def get_swift_conf_md5(self, openr=open):
"""get md5 of swift.conf"""
md5sum = md5()
try:
with openr(SWIFT_CONF_FILE, 'r') as fh:
chunk = fh.read(4096)
while chunk:
md5sum.update(chunk)
chunk = fh.read(4096)
except __HOLE__ as err:
if err.errno != errno.ENOENT:
self.logger.exception(_('Error reading swift.conf'))
hexsum = None
else:
hexsum = md5sum.hexdigest()
return {SWIFT_CONF_FILE: hexsum}
|
IOError
|
dataset/ETHPy150Open openstack/swift/swift/common/middleware/recon.py/ReconMiddleware.get_swift_conf_md5
|
6,779
|
def get_socket_info(self, openr=open):
"""
get info from /proc/net/sockstat and sockstat6
Note: The mem value is actually kernel pages, but we return bytes
allocated based on the systems page size.
"""
sockstat = {}
try:
with openr('/proc/net/sockstat', 'r') as proc_sockstat:
for entry in proc_sockstat:
if entry.startswith("TCP: inuse"):
tcpstats = entry.split()
sockstat['tcp_in_use'] = int(tcpstats[2])
sockstat['orphan'] = int(tcpstats[4])
sockstat['time_wait'] = int(tcpstats[6])
sockstat['tcp_mem_allocated_bytes'] = \
int(tcpstats[10]) * getpagesize()
except __HOLE__ as e:
if e.errno != errno.ENOENT:
raise
try:
with openr('/proc/net/sockstat6', 'r') as proc_sockstat6:
for entry in proc_sockstat6:
if entry.startswith("TCP6: inuse"):
sockstat['tcp6_in_use'] = int(entry.split()[2])
except IOError as e:
if e.errno != errno.ENOENT:
raise
return sockstat
|
IOError
|
dataset/ETHPy150Open openstack/swift/swift/common/middleware/recon.py/ReconMiddleware.get_socket_info
|
6,780
|
def begin(self):
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.core.servers.basehttp import AdminMediaHandler
from django.test.simple import TEST_MODULE
from tddspry.django.settings import IP, PORT
from twill import add_wsgi_intercept
log.debug('DjangoPlugin start')
# Find to Django models in tests modules for each of ``INSTALLED_APPS``
for label in settings.INSTALLED_APPS:
tests = label + '.' + TEST_MODULE
try:
self.load_tests(tests)
except (AttributeError, __HOLE__):
pass
# Setup Django test environment and test database
self.setup_django()
# Setup Twill for testing with Django
app = AdminMediaHandler(WSGIHandler())
add_wsgi_intercept(IP, PORT, lambda: app)
|
ImportError
|
dataset/ETHPy150Open playpauseandstop/tddspry/tddspry/noseplugins/djangoplugin.py/DjangoPlugin.begin
|
6,781
|
def configure(self, options, config):
Plugin.configure(self, options, config)
# Do nothing if plugin not enabled
if not self.enabled:
return
# Check that Django and twill libraries available in this system
if self.enabled:
for lib in ('Django', 'twill'):
try:
__import__(lib.lower())
except __HOLE__, e:
log.error('%s not available: %s' % (lib, e))
self.enabled = False
return
# Get user defined options
self.error_dir = options.error_dir
self.settings = options.settings
self.verbosity = options.verbosity
self.test_match_re = config.testMatch
# Try to load Django project settings
self.load_settings(self.settings)
# Make sure that ``TWILL_ERROR_DIR`` set to ``os.environ`` if needed
if self.error_dir:
os.environ['TWILL_ERROR_DIR'] = self.error_dir
|
ImportError
|
dataset/ETHPy150Open playpauseandstop/tddspry/tddspry/noseplugins/djangoplugin.py/DjangoPlugin.configure
|
6,782
|
def load_settings(self, settings):
# If settings module was set try to load or die with error
if settings is not None:
try:
resolve_name(settings)
except (AttributeError, ImportError):
return self.error(settings)
else:
settings = 'settings'
try:
resolve_name(settings)
except (AttributeError, __HOLE__):
dirname = os.getcwd()
loaded = False
subdirs = \
filter(lambda name: os.path.isdir(os.path.join(dirname,
name)),
os.listdir(dirname))
subdirs.sort()
for name in subdirs:
settings = name + '.settings'
try:
resolve_name(settings)
except (AttributeError, ImportError, ValueError):
pass
else:
loaded = True
break
if not loaded:
self.error(None, dirname, subdirs)
os.environ['DJANGO_SETTINGS_MODULE'] = settings
|
ImportError
|
dataset/ETHPy150Open playpauseandstop/tddspry/tddspry/noseplugins/djangoplugin.py/DjangoPlugin.load_settings
|
6,783
|
def test_null_session(self):
app = flask.Flask(__name__)
Session(app)
def expect_exception(f, *args, **kwargs):
try:
f(*args, **kwargs)
except __HOLE__ as e:
self.assertTrue(e.args and 'session is unavailable' in e.args[0])
else:
self.assertTrue(False, 'expected exception')
with app.test_request_context():
self.assertTrue(flask.session.get('missing_key') is None)
expect_exception(flask.session.__setitem__, 'foo', 42)
expect_exception(flask.session.pop, 'foo')
|
RuntimeError
|
dataset/ETHPy150Open fengsp/flask-session/test_session.py/FlaskSessionTestCase.test_null_session
|
6,784
|
def _get_cached_mine_data(self, *minion_ids):
# Return one dict with the cached mine data of the targeted minions
mine_data = dict([(minion_id, {}) for minion_id in minion_ids])
if (not self.opts.get('minion_data_cache', False)
and not self.opts.get('enforce_mine_cache', False)):
log.debug('Skipping cached mine data minion_data_cache'
'and enfore_mine_cache are both disabled.')
return mine_data
mdir = os.path.join(self.opts['cachedir'], 'minions')
try:
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
path = os.path.join(mdir, minion_id, 'mine.p')
if os.path.isfile(path):
with salt.utils.fopen(path, 'rb') as fp_:
mdata = self.serial.loads(fp_.read())
if isinstance(mdata, dict):
mine_data[minion_id] = mdata
except (OSError, __HOLE__):
return mine_data
return mine_data
|
IOError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/master.py/MasterPillarUtil._get_cached_mine_data
|
6,785
|
def _get_cached_minion_data(self, *minion_ids):
# Return two separate dicts of cached grains and pillar data of the
# minions
grains = dict([(minion_id, {}) for minion_id in minion_ids])
pillars = grains.copy()
if not self.opts.get('minion_data_cache', False):
log.debug('Skipping cached data because minion_data_cache is not '
'enabled.')
return grains, pillars
mdir = os.path.join(self.opts['cachedir'], 'minions')
try:
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
path = os.path.join(mdir, minion_id, 'data.p')
if os.path.isfile(path):
with salt.utils.fopen(path, 'rb') as fp_:
mdata = self.serial.loads(fp_.read())
if mdata.get('grains', False):
grains[minion_id] = mdata['grains']
if mdata.get('pillar', False):
pillars[minion_id] = mdata['pillar']
except (__HOLE__, IOError):
return grains, pillars
return grains, pillars
|
OSError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/master.py/MasterPillarUtil._get_cached_minion_data
|
6,786
|
def clear_cached_minion_data(self,
clear_pillar=False,
clear_grains=False,
clear_mine=False,
clear_mine_func=None):
'''
Clear the cached data/files for the targeted minions.
'''
clear_what = []
if clear_pillar:
clear_what.append('pillar')
if clear_grains:
clear_what.append('grains')
if clear_mine:
clear_what.append('mine')
if clear_mine_func is not None:
clear_what.append('mine_func: \'{0}\''.format(clear_mine_func))
if not len(clear_what):
log.debug('No cached data types specified for clearing.')
return False
minion_ids = self._tgt_to_list()
log.debug('Clearing cached {0} data for: {1}'.format(
', '.join(clear_what),
minion_ids))
if clear_pillar == clear_grains:
# clear_pillar and clear_grains are both True or both False.
# This means we don't deal with pillar/grains caches at all.
grains = {}
pillars = {}
else:
# Unless both clear_pillar and clear_grains are True, we need
# to read in the pillar/grains data since they are both stored
# in the same file, 'data.p'
grains, pillars = self._get_cached_minion_data(*minion_ids)
try:
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
cdir = os.path.join(self.opts['cachedir'], 'minions', minion_id)
if not os.path.isdir(cdir):
# Cache dir for this minion does not exist. Nothing to do.
continue
data_file = os.path.join(cdir, 'data.p')
mine_file = os.path.join(cdir, 'mine.p')
minion_pillar = pillars.pop(minion_id, False)
minion_grains = grains.pop(minion_id, False)
if ((clear_pillar and clear_grains) or
(clear_pillar and not minion_grains) or
(clear_grains and not minion_pillar)):
# Not saving pillar or grains, so just delete the cache file
os.remove(os.path.join(data_file))
elif clear_pillar and minion_grains:
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
os.close(tmpfh)
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
fp_.write(self.serial.dumps({'grains': minion_grains}))
salt.utils.atomicfile.atomic_rename(tmpfname, data_file)
elif clear_grains and minion_pillar:
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
os.close(tmpfh)
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
fp_.write(self.serial.dumps({'pillar': minion_pillar}))
salt.utils.atomicfile.atomic_rename(tmpfname, data_file)
if clear_mine:
# Delete the whole mine file
os.remove(os.path.join(mine_file))
elif clear_mine_func is not None:
# Delete a specific function from the mine file
with salt.utils.fopen(mine_file, 'rb') as fp_:
mine_data = self.serial.loads(fp_.read())
if isinstance(mine_data, dict):
if mine_data.pop(clear_mine_func, False):
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
os.close(tmpfh)
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
fp_.write(self.serial.dumps(mine_data))
salt.utils.atomicfile.atomic_rename(
tmpfname,
mine_file)
except (OSError, __HOLE__):
return True
return True
|
IOError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/master.py/MasterPillarUtil.clear_cached_minion_data
|
6,787
|
def run(self):
'''
Main loop of the ConCache, starts updates in intervals and
answers requests from the MWorkers
'''
context = zmq.Context()
# the socket for incoming cache requests
creq_in = context.socket(zmq.REP)
creq_in.setsockopt(zmq.LINGER, 100)
creq_in.bind('ipc://' + self.cache_sock)
# the socket for incoming cache-updates from workers
cupd_in = context.socket(zmq.SUB)
cupd_in.setsockopt(zmq.SUBSCRIBE, '')
cupd_in.setsockopt(zmq.LINGER, 100)
cupd_in.bind('ipc://' + self.update_sock)
# the socket for the timer-event
timer_in = context.socket(zmq.SUB)
timer_in.setsockopt(zmq.SUBSCRIBE, '')
timer_in.setsockopt(zmq.LINGER, 100)
timer_in.connect('ipc://' + self.upd_t_sock)
poller = zmq.Poller()
poller.register(creq_in, zmq.POLLIN)
poller.register(cupd_in, zmq.POLLIN)
poller.register(timer_in, zmq.POLLIN)
# our serializer
serial = salt.payload.Serial(self.opts.get('serial', ''))
# register a signal handler
signal.signal(signal.SIGINT, self.signal_handler)
# secure the sockets from the world
self.secure()
log.info('ConCache started')
while self.running:
# we check for new events with the poller
try:
socks = dict(poller.poll(1))
except __HOLE__:
self.stop()
except zmq.ZMQError as zmq_err:
log.error('ConCache ZeroMQ-Error occurred')
log.exception(zmq_err)
self.stop()
# check for next cache-request
if socks.get(creq_in) == zmq.POLLIN:
msg = serial.loads(creq_in.recv())
log.debug('ConCache Received request: {0}'.format(msg))
# requests to the minion list are send as str's
if isinstance(msg, str):
if msg == 'minions':
# Send reply back to client
reply = serial.dumps(self.minions)
creq_in.send(reply)
# check for next cache-update from workers
if socks.get(cupd_in) == zmq.POLLIN:
new_c_data = serial.loads(cupd_in.recv())
# tell the worker to exit
#cupd_in.send(serial.dumps('ACK'))
# check if the returned data is usable
if not isinstance(new_c_data, list):
log.error('ConCache Worker returned unusable result')
del new_c_data
continue
# the cache will receive lists of minions
# 1. if the list only has 1 item, its from an MWorker, we append it
# 2. if the list contains another list, its from a CacheWorker and
# the currently cached minions are replaced with that list
# 3. anything else is considered malformed
try:
if len(new_c_data) == 0:
log.debug('ConCache Got empty update from worker')
continue
data = new_c_data[0]
if isinstance(data, str):
if data not in self.minions:
log.debug('ConCache Adding minion {0} to cache'.format(new_c_data[0]))
self.minions.append(data)
elif isinstance(data, list):
log.debug('ConCache Replacing minion list from worker')
self.minions = data
except IndexError:
log.debug('ConCache Got malformed result dict from worker')
del new_c_data
log.info('ConCache {0} entries in cache'.format(len(self.minions)))
# check for next timer-event to start new jobs
if socks.get(timer_in) == zmq.POLLIN:
sec_event = serial.loads(timer_in.recv())
# update the list every 30 seconds
if int(sec_event % 30) == 0:
cw = CacheWorker(self.opts)
cw.start()
self.stop()
creq_in.close()
cupd_in.close()
timer_in.close()
context.term()
log.debug('ConCache Shutting down')
|
KeyboardInterrupt
|
dataset/ETHPy150Open saltstack/salt/salt/utils/master.py/ConnectedCache.run
|
6,788
|
def build(self, interfaces):
networks = {}
for interface in interfaces:
try:
network_label = self._extract_network_label(interface)
except __HOLE__:
continue
if network_label not in networks:
networks[network_label] = []
ip_addresses = list(self._extract_ipv4_addresses(interface))
if FLAGS.use_ipv6:
ipv6_address = self._extract_ipv6_address(interface)
if ipv6_address is not None:
ip_addresses.append(ipv6_address)
networks[network_label].extend(ip_addresses)
return networks
|
TypeError
|
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/openstack/views/addresses.py/ViewBuilderV11.build
|
6,789
|
def build_network(self, interfaces, requested_network):
for interface in interfaces:
try:
network_label = self._extract_network_label(interface)
except __HOLE__:
continue
if network_label == requested_network:
ips = list(self._extract_ipv4_addresses(interface))
ipv6 = self._extract_ipv6_address(interface)
if ipv6 is not None:
ips.append(ipv6)
return {network_label: ips}
return None
|
TypeError
|
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/openstack/views/addresses.py/ViewBuilderV11.build_network
|
6,790
|
def _extract_network_label(self, interface):
try:
return interface['network']['label']
except (__HOLE__, KeyError) as exc:
raise TypeError
|
TypeError
|
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/openstack/views/addresses.py/ViewBuilderV11._extract_network_label
|
6,791
|
def get_coord_meta(frame):
coord_meta = {}
coord_meta['type'] = ('longitude', 'latitude')
coord_meta['wrap'] = (None, None)
coord_meta['unit'] = (u.deg, u.deg)
try:
from astropy.coordinates import frame_transform_graph
if isinstance(frame, six.string_types):
frame = frame_transform_graph.lookup_name(frame)
names = list(frame().representation_component_names.keys())
coord_meta['name'] = names[:2]
except __HOLE__:
if isinstance(frame, six.string_types):
if frame in ('fk4', 'fk5', 'icrs'):
coord_meta['name'] = ('ra', 'dec')
elif frame == 'galactic':
coord_meta['name'] = ('l', 'b')
else:
raise ValueError("Unknown frame: {0}".format(frame))
return coord_meta
|
ImportError
|
dataset/ETHPy150Open glue-viz/glue/glue/external/wcsaxes/utils.py/get_coord_meta
|
6,792
|
def containers_equal(c1, c2):
"""Check that two container-like items have the same contents,
ignoring differences relating to the type of container
"""
if isinstance(c1, six.string_types):
return c1 == c2
try:
for a, b in zip(c1, c2):
if not containers_equal(a, b):
return False
if isinstance(c1, dict) and isinstance(c2, dict):
if not containers_equal(c1[a], c2[b]):
return False
except __HOLE__:
pass
return True
|
TypeError
|
dataset/ETHPy150Open glue-viz/glue/glue/core/tests/test_state.py/containers_equal
|
6,793
|
def list(self, request):
try:
timestamp = int(time())
random = randint(0, 100000)
result = add.apply_async(args=[timestamp, random])
now = datetime.datetime.now()
while (now + datetime.timedelta(seconds=10)) > datetime.datetime.now():
if result.result == timestamp + random:
return Response("OK", status=status.HTTP_200_OK)
sleep(0.5)
except __HOLE__:
pass
return Response("FAILED", status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
IOError
|
dataset/ETHPy150Open ustream/openduty/openduty/healthcheck.py/CeleryHealthCheckViewSet.list
|
6,794
|
def stringify(s, stype=None, fn=None):
''' Converts elements of a complex data structure to strings
The data structure can be a multi-tiered one - with tuples and lists etc
This method will loop through each and convert everything to string.
For example - it can be -
[[{'a1': {'a2': {'a3': ('a4', timedelta(0, 563)), 'a5': {'a6': datetime()}}}}]]
which will be converted to -
[[{'a1': {'a2': {'a3': ('a4', '0:09:23'), 'a5': {'a6': '2009-05-27 16:19:52.401500' }}}}]]
@param stype: If only one type of data element needs to be converted to
string without affecting others, stype can be used.
In the earlier example, if it is called with stringify(s, stype=datetime.timedelta)
the result would be
[[{'a1': {'a2': {'a3': ('a4', '0:09:23'), 'a5': {'a6': datetime() }}}}]]
Also, even though the name is stringify, any function can be run on it, based on
parameter fn. If fn is None, it will be stringified.
'''
if type(s) in [list, set, dict, tuple]:
if isinstance(s, dict):
for k in s:
s[k] = stringify(s[k], stype, fn)
elif type(s) in [list, set]:
for i, k in enumerate(s):
s[i] = stringify(k, stype, fn)
else: #tuple
tmp = []
for k in s:
tmp.append(stringify(k, stype, fn))
s = tuple(tmp)
else:
if fn:
if not stype or (stype == type(s)):
return fn(s)
else:
# To do str(s). But, str() can fail on unicode. So, use .encode instead
if not stype or (stype == type(s)):
try:
return six.text_type(s)
#return s.encode('ascii', 'replace')
except AttributeError:
return str(s)
except __HOLE__:
return s.decode('ascii', 'replace')
return s
|
UnicodeDecodeError
|
dataset/ETHPy150Open kra3/py-ga-mob/pyga/utils.py/stringify
|
6,795
|
def _commits(self, head='HEAD'):
"""Returns a list of the commits reachable from head.
:return: List of commit objects. the first of which will be the commit
of head, then following theat will be the parents.
:raise: RepoError if any no commits are referenced, including if the
head parameter isn't the sha of a commit.
"""
pending_commits = [head]
history = []
while pending_commits != []:
head = pending_commits.pop(0)
try:
commit = self[head]
except __HOLE__:
raise KeyError(head)
if type(commit) != Commit:
raise TypeError(commit)
if commit in history:
continue
i = 0
for known_commit in history:
if known_commit.commit_time > commit.commit_time:
break
i += 1
history.insert(i, commit)
pending_commits += commit.parents
return history
|
KeyError
|
dataset/ETHPy150Open rsgalloway/grit/grit/repo/local.py/Local._commits
|
6,796
|
def versions(self, version=None):
"""
List of Versions of this repository.
:param version: Version index.
:param rev: Commit sha or ref.
:return: List of Version objects matching params.
"""
try:
versions = [Version(self, c) for c in self._commits()]
except Exception, e:
log.debug('No versions exist')
return []
if version is not None and versions:
try:
versions = versions[version]
except __HOLE__:
raise VersionError('Version %s does not exist' % version)
return versions
|
IndexError
|
dataset/ETHPy150Open rsgalloway/grit/grit/repo/local.py/Local.versions
|
6,797
|
def _create_eager_adapter(self, context, row, adapter, path, loadopt):
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
else:
decorator = path.get(context.attributes, "eager_row_processor")
if decorator is None:
return False
try:
self.mapper.identity_key_from_row(row, decorator)
return decorator
except __HOLE__:
# no identity key - don't return a row
# processor, will cause a degrade to lazy
return False
|
KeyError
|
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/orm/strategies.py/JoinedLoader._create_eager_adapter
|
6,798
|
def import_modules(modules, safe=True):
'''Safely import a list of *modules*
'''
all = []
for mname in modules:
if mname.endswith('.*'):
to_load = expand_star(mname)
else:
to_load = [mname]
for module in to_load:
try:
all.append(import_module(module))
except __HOLE__:
if not safe:
raise
return all
|
ImportError
|
dataset/ETHPy150Open quantmind/pulsar/pulsar/utils/importer.py/import_modules
|
6,799
|
def import_system_file(mod, safe=True):
if os.path.isfile(mod):
return _import_system_file(mod)
else:
try:
return import_module(mod)
except __HOLE__:
mod2 = os.path.join(mod, '__init__.py')
if os.path.isfile(mod2):
return _import_system_file(mod2)
elif not safe:
raise
pass
|
ImportError
|
dataset/ETHPy150Open quantmind/pulsar/pulsar/utils/importer.py/import_system_file
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.