Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
8,300
|
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.net.proto2.proto import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except __HOLE__:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/net/proto2/python/public/descriptor.py/DescriptorBase.GetOptions
|
8,301
|
@staticmethod
def ProtoTypeToCppProtoType(proto_type):
"""Converts from a Python proto type to a C++ Proto Type.
The Python ProtocolBuffer classes specify both the 'Python' datatype and the
'C++' datatype - and they're not the same. This helper method should
translate from one to another.
Args:
proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*)
Returns:
descriptor.FieldDescriptor.CPPTYPE_*, the C++ type.
Raises:
TypeTransformationError: when the Python proto type isn't known.
"""
try:
return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type]
except __HOLE__:
raise TypeTransformationError('Unknown proto_type: %s' % proto_type)
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/net/proto2/python/public/descriptor.py/FieldDescriptor.ProtoTypeToCppProtoType
|
8,302
|
def analogRead(adc_pin):
""" Returns voltage read on given analog input pin. If passed one of
PyBBIO's AIN0-AIN5 keywords the voltage will be returned in millivolts.
May also be passed the path to an AIN file as created by a cape overlay,
in which case the value will be returned as found in the file. """
global _ADC_INITIALIZED
if not _ADC_INITIALIZED:
cape_manager.load(ADC_ENABLE_DTS_OVERLAY, auto_unload=False)
# Don't unload the overlay on exit for now because it can
# cause kernel panic.
bbio.delay(100)
_ADC_INITIALIZED = True
if adc_pin in ADC:
adc_pin = ADC[adc_pin]
adc_file = glob.glob(adc_pin[0])
if len(adc_file) == 0:
overlay = adc_pin[1]
# Overlay not loaded yet
cape_manager.load(overlay, auto_unload=False)
adc_file = glob.glob(adc_pin[0])
else:
adc_file = glob.glob(adc_pin)
if len(adc_file) == 0:
raise Exception('*Could not load overlay for adc_pin: %s' % adc_pin)
adc_file = adc_file[0]
# Occasionally the kernel will be writing to the file when you try
# to read it, to avoid IOError try up to 5 times:
for i in range(5):
try:
with open(adc_file, 'rb') as f:
val = f.read()
return int(val)
except __HOLE__:
continue
raise Exception('*Could not open AIN file: %s' % adc_file)
|
IOError
|
dataset/ETHPy150Open graycatlabs/PyBBIO/bbio/platform/beaglebone/adc.py/analogRead
|
8,303
|
@classmethod
def download_file(cls, uri, fobj):
"""
Given a URI, download the file to the ``fobj`` file-like object.
:param str uri: The URI of a file to download.
:param file fobj: A file-like object to download the file to.
:rtype: file
:returns: A file handle to the downloaded file.
"""
# Breaks the URI into usable componenents.
values = get_values_from_media_uri(uri)
conn = cls._get_aws_s3_connection(values['username'],
values['password'])
bucket = conn.get_bucket(values['host'])
key = bucket.get_key(values['path'])
logger.debug("S3Backend.download_file(): " \
"Downloading: %s" % uri)
try:
key.get_contents_to_file(fobj)
except __HOLE__:
# Raised by ResumableDownloadHandler in boto when the given S3
# key can't be found.
message = "The specified input file cannot be found."
raise InfileNotFoundException(message)
logger.debug("S3Backend.download_file(): " \
"Download of %s completed." % uri)
return fobj
|
AttributeError
|
dataset/ETHPy150Open duointeractive/media-nommer/media_nommer/core/storage_backends/s3.py/S3Backend.download_file
|
8,304
|
def getName(self):
try:
return self.params['name']
except __HOLE__:
try:
return self.params['object']
except KeyError:
pass
return None
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Field.getName
|
8,305
|
def getPluralName(self):
try:
return self.params['pluralName']
except __HOLE__:
pass
return "%ss" % Field.getName(self)
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Field.getPluralName
|
8,306
|
def getMapping(self):
try:
return self.params['mapping']
except __HOLE__:
pass
return 'one-to-one'
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Field.getMapping
|
8,307
|
def getType(self):
try:
return self.params['type']
except __HOLE__:
pass
return 'str'
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Field.getType
|
8,308
|
def getIndices(self):
try:
str = self.params['index']
indices = str.split()
for i, index in enumerate(indices):
compound_idx = index.split(':')
if len(compound_idx) > 1:
indices[i] = compound_idx
return indices
except __HOLE__:
pass
return []
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Field.getIndices
|
8,309
|
def isInverse(self):
try:
return self.params['inverse'] == 'true'
except __HOLE__:
pass
return False
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Field.isInverse
|
8,310
|
def getDiscriminator(self):
try:
return self.params['discriminator']
except __HOLE__:
pass
return None
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Field.getDiscriminator
|
8,311
|
def getReference(self):
try:
return self.params['object']
except __HOLE__:
pass
return ''
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Property.getReference
|
8,312
|
def isReference(self):
try:
return self.params['ref'] == 'true'
except __HOLE__:
pass
return False
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Property.isReference
|
8,313
|
def isPrimaryKey(self):
try:
return self.params['primaryKey'] == 'true'
except __HOLE__:
pass
return False
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Property.isPrimaryKey
|
8,314
|
def isForeignKey(self):
try:
return self.params['foreignKey'] == 'true'
except __HOLE__:
pass
return False
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Property.isForeignKey
|
8,315
|
def getName(self):
try:
return self.params['name']
except __HOLE__:
pass
return None
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Object.getName
|
8,316
|
def getClassName(self):
try:
return self.params['className']
except __HOLE__:
pass
return 'DB%s' % capitalizeOne(Object.getName(self))
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Object.getClassName
|
8,317
|
def getDiscriminatorProperty(self, dName):
try:
for property in self.properties:
if property.getName() == dName:
return property
except __HOLE__:
pass
return None
|
KeyError
|
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/db/bin/auto_gen_objects.py/Object.getDiscriminatorProperty
|
8,318
|
def TerminalSize():
"""Returns terminal length and width as a tuple."""
try:
with open(os.ctermid(), 'r') as tty:
length_width = struct.unpack(
'hh', fcntl.ioctl(tty.fileno(), termios.TIOCGWINSZ, '1234'))
except (IOError, OSError):
try:
length_width = (int(os.environ['LINES']),
int(os.environ['COLUMNS']))
except (__HOLE__, KeyError):
length_width = (24, 80)
return length_width
|
ValueError
|
dataset/ETHPy150Open google/textfsm/terminal.py/TerminalSize
|
8,319
|
def __init__(self, text=None, delay=None):
"""Constructor.
Args:
text: A string, the text that will be paged through.
delay: A boolean, if True will cause a slight delay
between line printing for more obvious scrolling.
"""
self._text = text or ''
self._delay = delay
try:
self._tty = open('/dev/tty')
except __HOLE__:
# No TTY, revert to stdin
self._tty = sys.stdin
self.SetLines(None)
self.Reset()
|
IOError
|
dataset/ETHPy150Open google/textfsm/terminal.py/Pager.__init__
|
8,320
|
def run_config(cfstring, param):
if not cfstring:
print("No command string defined to run {0}.".format(param), file=sys.stderr)
return
try:
cmd = cfstring % param
except __HOLE__: # no %s in cfstring, so just stick the param on the end
cmd = "%s %s" % (cfstring, param)
print("CMD:", repr(cmd))
return os.system(cmd)
|
TypeError
|
dataset/ETHPy150Open kdart/pycopia/core/pycopia/devhelpers.py/run_config
|
8,321
|
def find_source_file(modname, path=None):
if "." in modname:
pkgname, modname = modname.rsplit(".", 1)
pkg = find_package(pkgname)
return find_source_file(modname, pkg.__path__)
try:
fo, fpath, (suffix, mode, mtype) = imp.find_module(modname, path)
except __HOLE__:
ex, val, tb = sys.exc_info()
print("{} => {}: {}!".format(modname, ex.__name__, val), file=sys.stderr)
return None
if mtype == imp.PKG_DIRECTORY:
fo, ipath, desc = imp.find_module("__init__", [fpath])
fo.close()
return ipath
elif mtype == imp.PY_SOURCE:
return fpath
else:
return None
|
ImportError
|
dataset/ETHPy150Open kdart/pycopia/core/pycopia/devhelpers.py/find_source_file
|
8,322
|
def find_package(packagename, searchpath=None):
try:
return sys.modules[packagename]
except KeyError:
pass
for pkgname in _iter_subpath(packagename):
if "." in pkgname:
basepkg, subpkg = pkgname.rsplit(".", 1)
pkg = sys.modules[basepkg]
_load_package(subpkg, basepkg, pkg.__path__)
else:
try:
sys.modules[pkgname]
except __HOLE__:
_load_package(pkgname, None, searchpath)
return sys.modules[packagename]
|
KeyError
|
dataset/ETHPy150Open kdart/pycopia/core/pycopia/devhelpers.py/find_package
|
8,323
|
def find_from_package(pkgname, modname):
pkg = find_package(pkgname)
try:
fo, fpath, (suffix, mode, mtype) = imp.find_module(modname, pkg.__path__)
except __HOLE__:
ex, val, tb = sys.exc_info()
print("{} => {}: {}!".format(modname, ex.__name__, val), file=sys.stderr)
return None
fo.close()
if mtype == imp.PY_SOURCE:
return fpath
else:
return None
|
ImportError
|
dataset/ETHPy150Open kdart/pycopia/core/pycopia/devhelpers.py/find_from_package
|
8,324
|
def run(self, **options):
"""Override runserver's entry point to bring Gunicorn on.
A large portion of code in this method is copied from
`django.core.management.commands.runserver`.
"""
shutdown_message = options.get('shutdown_message', '')
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.datetime.now().strftime(r'%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
addr, port = self.addr, self.port
addr = '[{}]'.format(addr) if self._raw_ipv6 else addr
runner = GunicornRunner(addr, port, options)
try:
runner.run()
except __HOLE__:
runner.shutdown()
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
except:
runner.shutdown()
raise
|
KeyboardInterrupt
|
dataset/ETHPy150Open uranusjr/django-gunicorn/djgunicorn/management/commands/gunserver.py/Command.run
|
8,325
|
def listen(self):
'''This method is the main loop that listens for requests.'''
seeklock = threading.Lock()
cowfiles = []
while True:
try:
conn, addr = self.sock.accept()
# split off on a thread, allows us to handle multiple clients
dispatch = threading.Thread(target = self.handle_client, args = (conn, addr, seeklock))
# clients don't necessarily close the TCP connection
# so we use this to kill the program on ctrl-c
dispatch.daemon = True
dispatch.start()
# this is for the cleanup at the end. Will need clarifying
# if MemCOW
if self.cow and not self.in_mem:
cowfiles.append('PyPXE_NBD_COW_{addr[0]}_{addr[1]}'.format(addr = addr))
except __HOLE__:
map(os.remove, cowfiles)
return
|
KeyboardInterrupt
|
dataset/ETHPy150Open psychomario/PyPXE/pypxe/nbd/nbd.py/NBD.listen
|
8,326
|
def binomial(random_state, size=None, n=1, p=0.5, ndim=None,
dtype='int64', prob=None):
"""
Sample n times with probability of success prob for each trial,
return the number of successes.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of n and prob.
"""
if prob is not None:
p = prob
print("DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy.", file=sys.stderr)
n = tensor.as_tensor_variable(n)
p = tensor.as_tensor_variable(p)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, p)
if n.dtype == 'int64':
try:
numpy.random.binomial(n=numpy.asarray([2, 3, 4], dtype='int64'), p=numpy.asarray([.1, .2, .3], dtype='float64'))
except __HOLE__:
# THIS WORKS AROUND A NUMPY BUG on 32bit machine
n = tensor.cast(n, 'int32')
op = RandomFunction('binomial',
tensor.TensorType(dtype=dtype,
broadcastable=(False,) * ndim))
return op(random_state, size, n, p)
|
TypeError
|
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/raw_random.py/binomial
|
8,327
|
def process_cmd_line(app, opts, args):
""" Processes the passed command line arguments.
Input Arguments:
app -- A Mayavi application instance.
opts -- The list of options returned by getopt.
args -- The remaining arguments returned by getopt.
"""
from mayavi.core.common import error, exception
from tvtk.common import camel2enthought
sources = _get_non_file_sources()
script = app.script
last_obj = None
# Start a new scene by default if there is none currently and none
# was specified at the start of the command line arguments.
if script.engine.current_scene is None:
new_scene = False
if len(opts) == 0:
if len(args) == 0:
new_scene = True
elif (opts[0][0] not in ('-n', '--new-scene', '-z',
'--visualization', '--viz',
'-x', '--exec')):
new_scene = True
if new_scene:
last_obj = script.new_scene()
for o, a in opts:
if o in ('-d', '--data'):
base, ext = splitext(a)
if exists(a):
last_obj = script.open(a)
elif a in sources:
md = sources[a]
src = md.get_callable()()
script.add_source(src)
last_obj = src
else:
error("File/Source %s does not exist!"%a)
return
if o in ('-m', '--module'):
if '.' in a:
idx = a.rfind('.')
modname = a[:idx]
classname = a[idx+1:]
else:
modname = 'mayavi.modules.%s'%camel2enthought(a)
classname = a
try:
mod = __import__(modname, globals(), locals(), [classname])
except ImportError as msg:
exception(str(msg))
return
else:
m = getattr(mod, classname)()
if classname == 'Labels':
m.object = script.engine.current_object
script.add_module(m)
last_obj = m
if o in ('-f', '--filter'):
if '.' in a:
idx = a.rfind('.')
modname = a[:idx]
classname = a[idx+1:]
else:
if a[:12] == 'UserDefined:':
modname = 'mayavi.filters.user_defined'
classname = 'UserDefined'
# Create the wrapped filter.
fname = a[12:]
from tvtk.api import tvtk
try:
extra = getattr(tvtk, fname)()
except (AttributeError, TypeError):
# Don't worry about errors.
extra = None
else:
modname = 'mayavi.filters.%s'%camel2enthought(a)
classname = a
extra = None
try:
mod = __import__(modname, globals(), locals(), [classname])
except __HOLE__ as msg:
exception(str(msg))
return
else:
klass = getattr(mod, classname)
if classname != 'UserDefined':
f = klass()
else:
if extra is not None:
f = klass(filter=extra)
else:
f = klass()
f.setup_filter()
script.add_filter(f)
last_obj = f
if o in ('-M', '--module-mgr'):
from mayavi.core.module_manager \
import ModuleManager
mm = ModuleManager()
script.add_filter(mm)
last_obj = mm
if o in ('-n', '--new-scene'):
script.new_scene()
e = script.engine
s = e.scenes[-1]
e.set(current_scene=s, current_object=s)
last_obj = s
if o in ('-x', '--exec' ):
err = run_script(script, a)
if err: # stop processing options.
return
if o in ('-s', '--set'):
try:
stmt = 'last_obj.' + a
exec(stmt, locals(), globals())
except Exception as msg:
exception(str(msg))
if o in ('-z', '--visualization', '--viz'):
script.load_visualization(a)
# for remaining arguments simply load saved visualizations.
for arg in args:
base, ext = splitext (arg)
if ext == '.mv2':
script.load_visualization(arg)
elif ext == '.py':
err = run_script(script, arg)
if err: # stop processing arguments.
return
else:
script.open(arg)
|
ImportError
|
dataset/ETHPy150Open enthought/mayavi/mayavi/scripts/mayavi2.py/process_cmd_line
|
8,328
|
def isContainedInAll(contig, start, end, bedfiles):
for bedfile in bedfiles:
try:
if len(list(bedfile.fetch(contig, start, end))) == 0:
return False
except KeyError:
return False
except __HOLE__:
return False
return True
|
ValueError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/beds2beds.py/isContainedInAll
|
8,329
|
def isContainedInOne(contig, start, end, bedfiles):
for bedfile in bedfiles:
try:
if len(list(bedfile.fetch(contig, start, end))) > 0:
return True
except __HOLE__:
pass
except ValueError:
pass
return False
|
KeyError
|
dataset/ETHPy150Open CGATOxford/cgat/scripts/beds2beds.py/isContainedInOne
|
8,330
|
def _get_raw_data(self, image, format, quality):
"""
Returns the raw data from the Image, which can be directly written
to a something, be it a file-like object or a database.
:param PIL.Image image: The image to get the raw data for.
:param str format: The format to save to. If this value is ``None``,
PIL will attempt to guess. You're almost always better off
providing this yourself. For a full list of formats, see the PIL
handbook at:
http://www.pythonware.com/library/pil/handbook/index.htm
The *Appendixes* section at the bottom, in particular.
:param int quality: A quality level as a percent. The lower, the
higher the compression, the worse the artifacts. Check the
format's handbook page for what the different values for this mean.
For example, JPEG's max quality level is 95, with 100 completely
disabling JPEG quantization.
:rtype: str
:returns: A string representation of the image.
"""
ImageFile.MAXBLOCK = 1024 * 1024
buf = StringIO()
try:
# ptimize makes the encoder do a second pass over the image, if
# the format supports it.
image.save(buf, format=format, quality=quality, optimize=1)
except __HOLE__:
# optimize is a no-go, omit it this attempt.
image.save(buf, format=format, quality=quality)
raw_data = buf.getvalue()
buf.close()
return raw_data
|
IOError
|
dataset/ETHPy150Open gtaylor/django-athumb/athumb/pial/engines/pil_engine.py/PILEngine._get_raw_data
|
8,331
|
@classmethod
def app(cls, slug, asset):
game = get_game_by_slug(slug)
if not game:
abort(404, 'Invalid game: %s' % slug)
try:
depth = int(request.params.get('depth', cls.default_depth))
list_cull = int(request.params.get('list_cull', cls.default_list_cull))
dict_cull = int(request.params.get('dict_cull', cls.default_dict_cull))
expand = bool(request.params.get('expand', False))
userdata = int(request.params.get('userdata', 0))
except __HOLE__ as e:
abort(404, 'Invalid parameter: %s' % str(e))
depth = max(1, depth)
list_cull = max(1, list_cull)
dict_cull = max(1, dict_cull)
node = request.params.get('node', None)
if node:
try:
(json_asset, filename) = get_asset(asset, slug, userdata)
link_prefix = '/disassemble/%s' % slug
disassembler = Disassembler(Json2htmlRenderer(), list_cull, dict_cull, depth, link_prefix)
response.status = 200
Compactor.disable(request)
return disassembler.mark_up_asset({'root': json_asset}, expand, node)
except IOError as e:
abort(404, str(e))
except json.JSONDecodeError as e:
_, ext = os.path.splitext(filename)
if ext == '.json':
abort(404, 'Failed decoding JSON asset: %s\nError was: %s' % (asset, str(e)))
else:
abort(404, 'Currently unable to disassemble this asset: %s' % asset)
else:
c.game = game
local_context = { 'asset': asset,
'list_cull': list_cull,
'dict_cull': dict_cull,
'depth': depth,
'userdata': userdata }
return render('/disassembler/disassembler.html', local_context)
|
TypeError
|
dataset/ETHPy150Open turbulenz/turbulenz_local/turbulenz_local/controllers/disassembler.py/DisassemblerController.app
|
8,332
|
def _submit_jobs(self, func, iterable, mode, chunksize=1, dependencies=[]):
if mode not in ['map', 'reduce']:
raise ValueError('mode must be one of "map" or "reduce"')
moreData = True
allJobs = []
iterable = iter(iterable)
# for each partition
while moreData:
curChunks = itertools.islice(iterable, chunksize)
# save the function and data to a new pickle file
workFile = tempfile.NamedTemporaryFile(prefix='SGE_%s_'%mode, dir=os.path.abspath('.'), delete=False, mode='wb')
#print os.environ['PYTHONPATH']
#print pickle.dumps(os.environ['PYTHONPATH'])
#pickle.dump(os.environ['PYTHONPATH'], workFile) # dump all data about environment variables
pickle.dump(self.initializer, workFile, -1)
if mode == 'map':
pickle.dump(self.initargs, workFile, -1)
try:
pickle.dump(func, workFile, -1) # dump mapping function
except __HOLE__ as e:
# Failure pickling function-- probably a lambda function
os.remove(workFile.name)
raise e # Can't pickle lambda or shell-defined functions
curDataCount = 0
for data in curChunks:
curDataCount += 1
pickle.dump(data, workFile, -1)
if curDataCount == 0:
# no data was sliced from original dataset-- we are done
moreData = False
os.remove(workFile.name)
else:
# submit an SGE job on this dataset slice
outfile = tempfile.NamedTemporaryFile(prefix='SGE_%s_results_'%mode, dir=os.path.abspath('.'), delete=False)
cmd = 'cd %s; $python_cmd "%s" --mode=%s "%s" "%s"' % (os.path.split(workFile.name)[0], os.path.abspath(__file__), mode, workFile.name, outfile.name)
curJob = Job(os.path.split(workFile.name)[1], cmd)
for job in dependencies:
curJob.addDependency(job)
curJob.inputFile = workFile.name
curJob.outputFile = outfile.name
build_submission(os.path.abspath('.'), [curJob], use_grid_engine=self.use_grid_engine)
allJobs.append(curJob)
return allJobs
|
TypeError
|
dataset/ETHPy150Open uci-cbcl/tree-hmm/treehmm/sge.py/SGEPool._submit_jobs
|
8,333
|
def run_safe_jobs( directory, jobs, processes=None):
"""In the event that Grid Engine is not installed, this program will
run jobs serially on the local host."""
#pool = Pool(processes=max(cpu_count(), 1))
pool = Pool(processes=processes)
errorCodes = []
for job in jobs:
job.out = os.path.join( directory, "stdout" )
job.err = os.path.join( directory, "stderr" )
commands = []
if isinstance( job, JobGroup ):
for task in range(1,job.tasks+1):
command = "export SGE_TASK_ID=%d; %s" % (task, job.scriptPath)
commands.append(command)
else:
commands.append(job.scriptPath)
count = 0
for command in commands:
print "# %s" % (command)
#command += " 2>%s/%s.%d >%s/%s.%d" % (job.err, job.name, count, job.out, job.name, count)
#os.system(command)
print 'the command is', command
errorCode = pool.apply_async(os.system, (command,))
errorCodes.append(errorCode)
count += 1
job.submitted = 1
# wait for submitted jobs to finish
pool.close()
pool.join()
# make sure all the jobs were successful
for index, code in enumerate(errorCodes):
print index, code
try:
code.successful()
except __HOLE__, e:
raise AssertionError("Job Failed to run: %s %s" % (jobs[index], commands[index]), e.args)
|
AssertionError
|
dataset/ETHPy150Open uci-cbcl/tree-hmm/treehmm/sge.py/run_safe_jobs
|
8,334
|
def main():
''' map() a function against data as given in an input pickle file, saving result to a pickle.'''
import sys, os, optparse
usage = "%prog [options] inputPickle outputPickle \n" + main.__doc__
parser = optparse.OptionParser(usage)
parser.add_option('--mode', dest='mode', type='string',
help="""set the mode of operation. Should be one of ['map', 'reduce']""")
opts, args = parser.parse_args()
inputPickleName, outputPickleName = args
with open(inputPickleName, 'rb') as inputPickleFile:
with open(outputPickleName, 'wb') as outputPickleFile:
#os.environ['PYTHONPATH'] = pickle.load(inputPickleFile)
try:
initializer = pickle.load(inputPickleFile)
if opts.mode == 'map':
initargs = pickle.load(inputPickleFile)
else:
initargs = None
except Exception as e:
# save and return the exception, to be debugged upstream
pickle.dump(type(e)(('Error Loading initializer from pickle file',) + e.args), outputPickleFile, -1)
raise
else:
if initializer is not None:
if initargs is not None:
initializer(initargs)
else:
initializer()
try:
func = pickle.load(inputPickleFile)
except __HOLE__ as e:
pickle.dump(e, outputPickleFile, -1) # Error loading
raise
else:
# If I didn't want error checking on each step, I'd do this at the top level:
## result = reduce(func, (data if type(data) != Job else pickle.load(data.outputFile) for data in curData), initializer)
moreData = True
curData = []
while moreData:
try:
data = pickle.load(inputPickleFile)
except EOFError:
moreData = False
else:
if data.__class__ == Job: # load the results from the previous Job
try:
data = pickle.load(data.outputFile)
except Exception as e:
pickle.dump(type(e)(("Error loading previous Job's results! ",) + e.args), outputPickleFile, -1)
raise
curData.append(data)
if opts.mode == 'map':
try:
#print 'mapping', func, 'on', curData[0]
result = func(curData.pop(0))
except Exception as e:
# save and return the exception, to be debugged upstream
pickle.dump(type(e)(('Error in map function: ',) + e.args), outputPickleFile, -1)
raise
else:
print 'saving', result
pickle.dump(result, outputPickleFile, -1)
else:
# reduce curData
result = reduce(func, curData[:2], initializer)
initializer = None
del curData[:2]
curData.insert(0, result)
|
AttributeError
|
dataset/ETHPy150Open uci-cbcl/tree-hmm/treehmm/sge.py/main
|
8,335
|
def get_site_url():
try:
from_site_config = m.config.Config.get('site', 'site_domain', None)
from_settings = get_setting_value('SERVER_NAME', None)
if from_settings and not from_settings.startswith('http'):
from_settings = 'http://%s/' % from_settings
return from_site_config or from_settings or request.url_root
except __HOLE__:
return 'http://localhost/'
|
RuntimeError
|
dataset/ETHPy150Open rochacbruno/quokka/quokka/utils/settings.py/get_site_url
|
8,336
|
def get_setting_value(key, default=None):
try:
return current_app.config.get(key, default)
except __HOLE__ as e:
logger.warning('current_app is inaccessible: %s' % e)
try:
app = create_app_min()
db.init_app(app)
with app.app_context():
return app.config.get(key, default)
except:
return default
|
RuntimeError
|
dataset/ETHPy150Open rochacbruno/quokka/quokka/utils/settings.py/get_setting_value
|
8,337
|
def _parse_options(options):
"""Returns a Response after validating the input.
"""
if options[1] in SORT_TYPES.keys():
sort_type = options[1]
if len(options) > 2:
subreddit, sort_type, num_results = options
try:
num_results = int(num_results)
return fetch_posts(subreddit, sort_type, num_results)
except __HOLE__:
# Third option is invalid.
response_text = ("Sorry! `%s` isn't a valid sorting number. Try `/reddit %s %s 5`"
% (subreddit, sort_type, num_results))
return _create_response(response_text)
else:
# No option for results to return.
return fetch_posts(options[0], sort_type)
else:
if isinstance(options[1], int):
# For the case that someone passes `/reddit [subreddit] [sort_num]`
return fetch_posts(options[0], 'hot', options[1])
else:
return _create_response('Invalid search. Try `/reddit aww top 5` or `/reddit help`.')
|
ValueError
|
dataset/ETHPy150Open arjunblj/slack-reddit/app.py/_parse_options
|
8,338
|
def check_monospace(familydir):
files = listdir(familydir)
glyphwidths = []
for f in files:
if not unicode(f).endswith('.ttf'):
continue
font = fontToolsOpenFont(unicode(f))
for table in font['cmap'].tables:
if not (table.platformID == 3 and table.platEncID in [1, 10]):
continue
for glyphname in table.cmap:
try:
glyphwidths.append(font['hmtx'][glyphname][0])
except (__HOLE__, KeyError):
# can't read hmtx for glyphname, append value of zero
glyphwidths.append(0)
# if all glyphs has the same widths then it is easy to check
# by casting list to python sets.
return len(set(glyphwidths)) == 1
|
IndexError
|
dataset/ETHPy150Open googlefonts/fontbakery/bakery_cli/scripts/genmetadata.py/check_monospace
|
8,339
|
def _load_storage(self):
"""Loads the storage backend.
This will attempt to load the SSH storage backend. If there is an
error in loading the backend, it will be logged, and an
ImproperlyConfigured exception will be raised.
"""
try:
path = getattr(settings, 'RBSSH_STORAGE_BACKEND',
self.DEFAULT_STORAGE)
except __HOLE__:
# We may not be running in the Django environment.
path = self.DEFAULT_STORAGE
i = path.rfind('.')
module, class_name = path[:i], path[i + 1:]
try:
mod = __import__(module, {}, {}, [class_name])
except ImportError as e:
msg = 'Error importing SSH storage backend %s: "%s"' % (module, e)
logging.critical(msg)
raise ImproperlyConfigured(msg)
try:
self.storage = getattr(mod, class_name)(namespace=self.namespace)
except Exception as e:
msg = 'Error instantiating SSH storage backend %s: "%s"' % \
(module, e)
logging.critical(msg)
raise
|
ImportError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/ssh/client.py/SSHClient._load_storage
|
8,340
|
def get_user_key(self):
"""Returns the keypair of the user running Review Board.
This will be an instance of :py:mod:`paramiko.PKey`, representing
a DSS or RSA key, as long as one exists. Otherwise, it may return None.
"""
key = None
fp = None
try:
key = self.storage.read_user_key()
except paramiko.SSHException as e:
logging.error('SSH: Unknown error accessing user key: %s' % e)
except paramiko.PasswordRequiredException as e:
logging.error('SSH: Unable to access password protected '
'key file: %s' % e)
except __HOLE__ as e:
logging.error('SSH: Error reading user key: %s' % e)
if fp:
fp.close()
return key
|
IOError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/ssh/client.py/SSHClient.get_user_key
|
8,341
|
def is_key_authorized(self, key):
"""Returns whether or not a public key is currently authorized."""
public_key = key.get_base64()
try:
lines = self.storage.read_authorized_keys()
for line in lines:
try:
authorized_key = line.split()[1]
except (ValueError, __HOLE__):
continue
if authorized_key == public_key:
return True
except IOError:
pass
return False
|
IndexError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/ssh/client.py/SSHClient.is_key_authorized
|
8,342
|
def _write_user_key(self, key):
"""Convenience function to write a user key and check for errors.
Any errors caused as a result of writing a user key will be logged.
"""
try:
self.storage.write_user_key(key)
except UnsupportedSSHKeyError as e:
logging.error('Failed to write unknown key type %s' % type(key))
raise
except __HOLE__ as e:
logging.error('Failed to write SSH user key: %s' % e)
raise
except Exception as e:
logging.error('Unknown error writing SSH user key: %s' % e,
exc_info=1)
raise
|
IOError
|
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/ssh/client.py/SSHClient._write_user_key
|
8,343
|
def _ipython_active():
"""return true if in an active IPython session"""
try:
import IPython
except ImportError:
return False
try:
if IPython.__version__ >= "0.12":
return __IPYTHON__
return __IPYTHON__active
except __HOLE__:
return False
|
NameError
|
dataset/ETHPy150Open manahl/mdf/mdf/viewer/__init__.py/_ipython_active
|
8,344
|
def get_value(process, name):
result = getattr(process, name)
try:
return result()
except __HOLE__:
return result
|
TypeError
|
dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/processresources/processresources.py/get_value
|
8,345
|
def test_flake8():
"""Test source code for pyFlakes and PEP8 conformance"""
flake8style = flake8.engine.StyleGuide(max_line_length=120)
report = flake8style.options.report
report.start()
this_dir = os.path.dirname(os.path.abspath(__file__))
try:
input_dir = flake8style.input_dir
except __HOLE__:
input_dir = flake8style._styleguide.input_dir
input_dir(os.path.join(this_dir, '..', 'catkin_tools'))
report.stop()
assert report.total_errors == 0, \
("Found '{0}' code style errors (and warnings)."
.format(report.total_errors))
|
AttributeError
|
dataset/ETHPy150Open catkin/catkin_tools/tests/test_code_format.py/test_flake8
|
8,346
|
def load(self):
self._data = {}
try:
self._data = json.load(open(self.path, "rU"))
self._dirty = False
except __HOLE__:
logger.warning("Unable to load configuration at '{0}'. No file found.".format(self.path))
except ValueError as e:
logger.error("Unable to load configuration at '{0}'. Invalid JSON caused by: {1}".format(self.path, e))
except Exception as e:
logger.exception("Unable to load configuration at '{0}'.".format(self.path))
|
IOError
|
dataset/ETHPy150Open koenbok/Cactus/cactus/config/file.py/ConfigFile.load
|
8,347
|
def add(self, review, reason):
reason = 'amo:review:spam:%s' % reason
try:
reasonset = cache.get('amo:review:spam:reasons', set())
except __HOLE__:
reasonset = set()
try:
idset = cache.get(reason, set())
except KeyError:
idset = set()
reasonset.add(reason)
cache.set('amo:review:spam:reasons', reasonset)
idset.add(review.id)
cache.set(reason, idset)
return True
|
KeyError
|
dataset/ETHPy150Open mozilla/addons-server/src/olympia/reviews/models.py/Spam.add
|
8,348
|
def __new__(cls, *args, **kwargs):
args, kwargs = cls._process_args(*args, **kwargs)
# First argument is the object we're going to cache on
cache_obj = args[0]
# These are now the arguments to the subclass constructor
args = args[1:]
key = cls._cache_key(*args, **kwargs)
def make_obj():
obj = super(ObjectCached, cls).__new__(cls)
obj._initialized = False
# obj.__init__ will be called twice when constructing
# something not in the cache. The first time here, with
# the canonicalised args, the second time directly in the
# subclass. But that one should hit the cache and return
# straight away.
obj.__init__(*args, **kwargs)
return obj
# Don't bother looking in caches if we're not meant to cache
# this object.
if key is None:
return make_obj()
# Does the caching object know about the caches?
try:
cache = cache_obj._cache
except AttributeError:
raise RuntimeError("Provided caching object does not have a '_cache' attribute.")
# OK, we have a cache, let's go ahead and try and find our
# object in it.
try:
return cache[key]
except __HOLE__:
obj = make_obj()
cache[key] = obj
return obj
|
KeyError
|
dataset/ETHPy150Open OP2/PyOP2/pyop2/caching.py/ObjectCached.__new__
|
8,349
|
def __new__(cls, *args, **kwargs):
args, kwargs = cls._process_args(*args, **kwargs)
key = cls._cache_key(*args, **kwargs)
def make_obj():
obj = super(Cached, cls).__new__(cls)
obj._key = key
obj._initialized = False
# obj.__init__ will be called twice when constructing
# something not in the cache. The first time here, with
# the canonicalised args, the second time directly in the
# subclass. But that one should hit the cache and return
# straight away.
obj.__init__(*args, **kwargs)
return obj
# Don't bother looking in caches if we're not meant to cache
# this object.
if key is None:
return make_obj()
try:
return cls._cache_lookup(key)
except (KeyError, __HOLE__):
obj = make_obj()
cls._cache_store(key, obj)
return obj
|
IOError
|
dataset/ETHPy150Open OP2/PyOP2/pyop2/caching.py/Cached.__new__
|
8,350
|
def __init__(self, verbose_name=None, bases=(models.Model,),
user_related_name='+', table_name=None, inherit=False):
self.user_set_verbose_name = verbose_name
self.user_related_name = user_related_name
self.table_name = table_name
self.inherit = inherit
try:
if isinstance(bases, six.string_types):
raise TypeError
self.bases = tuple(bases)
except __HOLE__:
raise TypeError("The `bases` option must be a list or a tuple.")
|
TypeError
|
dataset/ETHPy150Open treyhunner/django-simple-history/simple_history/models.py/HistoricalRecords.__init__
|
8,351
|
def finalize(self, sender, **kwargs):
try:
hint_class = self.cls
except __HOLE__: # called via `register`
pass
else:
if hint_class is not sender: # set in concrete
if not (self.inherit and issubclass(sender, hint_class)): # set in abstract
return
if hasattr(sender._meta, 'simple_history_manager_attribute'):
raise exceptions.MultipleRegistrationsError('{}.{} registered multiple times for history tracking.'.format(
sender._meta.app_label,
sender._meta.object_name,
))
history_model = self.create_history_model(sender)
module = importlib.import_module(self.module)
setattr(module, history_model.__name__, history_model)
# The HistoricalRecords object will be discarded,
# so the signal handlers can't use weak references.
models.signals.post_save.connect(self.post_save, sender=sender,
weak=False)
models.signals.post_delete.connect(self.post_delete, sender=sender,
weak=False)
descriptor = HistoryDescriptor(history_model)
setattr(sender, self.manager_name, descriptor)
sender._meta.simple_history_manager_attribute = self.manager_name
|
AttributeError
|
dataset/ETHPy150Open treyhunner/django-simple-history/simple_history/models.py/HistoricalRecords.finalize
|
8,352
|
def create_history_model(self, model):
"""
Creates a historical model to associate with the model provided.
"""
attrs = {'__module__': self.module}
app_module = '%s.models' % model._meta.app_label
if model.__module__ != self.module:
# registered under different app
attrs['__module__'] = self.module
elif app_module != self.module:
try:
# Abuse an internal API because the app registry is loading.
app = apps.app_configs[model._meta.app_label]
except __HOLE__: # Django < 1.7
models_module = get_app(model._meta.app_label).__name__
else:
models_module = app.name
attrs['__module__'] = models_module
fields = self.copy_fields(model)
attrs.update(fields)
attrs.update(self.get_extra_fields(model, fields))
# type in python2 wants str as a first argument
attrs.update(Meta=type(str('Meta'), (), self.get_meta_options(model)))
if self.table_name is not None:
attrs['Meta'].db_table = self.table_name
name = 'Historical%s' % model._meta.object_name
registered_models[model._meta.db_table] = model
return python_2_unicode_compatible(
type(str(name), self.bases, attrs))
|
NameError
|
dataset/ETHPy150Open treyhunner/django-simple-history/simple_history/models.py/HistoricalRecords.create_history_model
|
8,353
|
def copy_fields(self, model):
"""
Creates copies of the model's original fields, returning
a dictionary mapping field name to copied field object.
"""
fields = {}
for field in model._meta.fields:
field = copy.copy(field)
try:
field.remote_field = copy.copy(field.remote_field)
except __HOLE__:
field.rel = copy.copy(field.rel)
if isinstance(field, OrderWrt):
# OrderWrt is a proxy field, switch to a plain IntegerField
field.__class__ = models.IntegerField
if isinstance(field, models.ForeignKey):
old_field = field
field_arguments = {'db_constraint': False}
if (getattr(old_field, 'one_to_one', False) or
isinstance(old_field, models.OneToOneField)):
FieldType = models.ForeignKey
else:
FieldType = type(old_field)
if getattr(old_field, 'to_fields', []):
field_arguments['to_field'] = old_field.to_fields[0]
if getattr(old_field, 'db_column', None):
field_arguments['db_column'] = old_field.db_column
field = FieldType(
old_field.rel.to,
related_name='+',
null=True,
blank=True,
primary_key=False,
db_index=True,
serialize=True,
unique=False,
on_delete=models.DO_NOTHING,
**field_arguments
)
field.name = old_field.name
else:
transform_field(field)
fields[field.name] = field
return fields
|
AttributeError
|
dataset/ETHPy150Open treyhunner/django-simple-history/simple_history/models.py/HistoricalRecords.copy_fields
|
8,354
|
def get_history_user(self, instance):
"""Get the modifying user from instance or middleware."""
try:
return instance._history_user
except AttributeError:
try:
if self.thread.request.user.is_authenticated():
return self.thread.request.user
return None
except __HOLE__:
return None
|
AttributeError
|
dataset/ETHPy150Open treyhunner/django-simple-history/simple_history/models.py/HistoricalRecords.get_history_user
|
8,355
|
def get_wordlist(language, word_source):
""" Takes in a language and a word source and returns a matching wordlist,
if it exists.
Valid languages: ['english']
Valid word sources: ['bip39', 'wiktionary', 'google']
"""
try:
wordlist_string = eval(language + '_words_' + word_source)
except __HOLE__:
raise Exception("No wordlist could be found for the word source and language provided.")
wordlist = wordlist_string.split(',')
return wordlist
|
NameError
|
dataset/ETHPy150Open blockstack/pybitcoin/pybitcoin/passphrases/passphrase.py/get_wordlist
|
8,356
|
@classmethod
def setupClass(cls):
global plt
try:
import matplotlib as mpl
mpl.use('PS',warn=False)
import matplotlib.pyplot as plt
except __HOLE__:
raise SkipTest('matplotlib not available.')
except RuntimeError:
raise SkipTest('matplotlib not available.')
|
ImportError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/drawing/tests/test_pylab.py/TestPylab.setupClass
|
8,357
|
def load_item(item):
with open(os.path.join(ITEM_DIR, item),'r') as e:
try:
logging.debug('Caching item: %s' % item)
return json.loads(''.join(e.readlines()))
except __HOLE__,e:
raise Exception('Failed to load item: %s' % e)
|
ValueError
|
dataset/ETHPy150Open flags/Reactor-3/items.py/load_item
|
8,358
|
def read_file(filename, mode="rb"):
"""Returns the given file's contents.
:param str filename: path to file
:param str mode: open mode (see `open`)
:returns: absolute path of filename and its contents
:rtype: tuple
:raises argparse.ArgumentTypeError: File does not exist or is not readable.
"""
try:
filename = os.path.abspath(filename)
return filename, open(filename, mode).read()
except __HOLE__ as exc:
raise argparse.ArgumentTypeError(exc.strerror)
|
IOError
|
dataset/ETHPy150Open letsencrypt/letsencrypt/certbot/cli.py/read_file
|
8,359
|
def prescan_for_flag(self, flag, possible_arguments):
"""Checks cli input for flags.
Check for a flag, which accepts a fixed set of possible arguments, in
the command line; we will use this information to configure argparse's
help correctly. Return the flag's argument, if it has one that matches
the sequence @possible_arguments; otherwise return whether the flag is
present.
"""
if flag not in self.args:
return False
pos = self.args.index(flag)
try:
nxt = self.args[pos + 1]
if nxt in possible_arguments:
return nxt
except __HOLE__:
pass
return True
|
IndexError
|
dataset/ETHPy150Open letsencrypt/letsencrypt/certbot/cli.py/HelpfulArgumentParser.prescan_for_flag
|
8,360
|
def layouts_info():
''' Return list of informations about each available layout.
Informations (title, slug, and preview) about each layout
are laoded from the `MANIFEST.json` file in layout dir (if it exists),
else default informations are returned.
'''
layouts_list = []
# Get all available layouts
for layout in list_layouts():
# Default infos
current_layout = {'slug': layout['slug'],
'title': layout['slug'].capitalize(),
'preview': os.path.join(settings.STATIC_URL,
settings.LAYOUTS_DEFAULT_PATH,
'icon-layouts.png')}
# Reading manifest file
# and pickup infos (for updating default values)
try:
manifest_file = os.path.join(layout['path'], 'MANIFEST.json')
with open(manifest_file, 'r') as f:
try:
manifest = simplejson.loads(f.read())
if 'preview' in manifest.keys():
manifest['preview'] = '%slayouts/%s/%s' % (settings.STATIC_URL,
layout['slug'],
manifest['preview'])
except __HOLE__ as e:
sys.stderr.write('JSON syntax error in %s\n%s\n\n' % (manifest_file, str(e)))
continue
current_layout.update(**manifest)
except IOError:
pass
layouts_list.append(current_layout)
layouts_list = sorted(layouts_list, key=lambda layout: layout['title'].lower())
return layouts_list
|
ValueError
|
dataset/ETHPy150Open ionyse/ionyweb/ionyweb/loaders/manifest.py/layouts_info
|
8,361
|
def themes_info(slug=None):
''' Loads the MANIFEST for each available theme
>>> themes_info()
{u'natim': {'preview': '/_static/themes/icon-themes.png', 'title': u'natim'},
u'jungleland': {'website': u'http://www.styleshout.com/', 'description': u'...',
'title': u'Jungle Land', 'author': u'styleshout', 'date': u'01/09/2009',
'preview': u'/_static/themes/jungleland/jungleland.jpg'}}
'''
# Get theme with slug or all themes list
themes = []
if slug is not None:
theme_slug = get_theme(slug)
if theme_slug:
themes = [theme_slug]
else:
return None
if not themes:
themes = list_themes()
themes_list = []
for theme in themes:
current_theme = {'slug': theme['slug'],
'title': theme['slug'].capitalize(),
'preview': ['%sthemes/icon-themes.png' % settings.STATIC_URL]}
try:
manifest_file = os.path.join(theme['path'], 'MANIFEST.json')
with open(manifest_file, 'r') as f:
try:
manifest = simplejson.loads(f.read())
# Set infos about preview files
if 'preview' in manifest.keys():
if isinstance(manifest['preview'], unicode) or isinstance(manifest['preview'], str):
previews = [manifest['preview']]
else:
previews = list(manifest['preview'])
manifest['preview'] = []
for preview in previews:
manifest['preview'].append(os.path.join(settings.STATIC_URL,
settings.THEMES_DEFAULT_PATH,
theme['slug'],
preview))
# set correct Path for each style
if 'styles' in manifest.keys():
for i in xrange(len(manifest['styles'])):
manifest['styles'][i]['preview'] = os.path.join(settings.STATIC_URL,
settings.THEMES_DEFAULT_PATH,
theme['slug'],
manifest['styles'][i]['preview'])
# Set infos about templates files
if 'templates' in manifest.keys():
templates_list = manifest['templates']
for template in templates_list:
template['preview'] = os.path.join(settings.STATIC_URL,
settings.THEMES_DEFAULT_PATH,
theme['slug'],
template['preview'])
except ValueError as e:
sys.stderr.write('JSON syntax error in %s\n%s\n\n' % (manifest_file, str(e)))
continue
current_theme.update(**manifest)
except __HOLE__:
pass
themes_list.append(current_theme)
return themes_list
|
IOError
|
dataset/ETHPy150Open ionyse/ionyweb/ionyweb/loaders/manifest.py/themes_info
|
8,362
|
def get_stats(self, params):
metrics = {'status': {}}
if not self.connect(params):
return metrics
rows = self.get_db_global_status()
for row in rows:
try:
metrics['status'][row['Variable_name']] = float(row['Value'])
except:
pass
if self.config['master']:
metrics['master'] = {}
try:
rows = self.get_db_master_status()
for row_master in rows:
for key, value in row_master.items():
if key in self._IGNORE_KEYS:
continue
try:
metrics['master'][key] = float(row_master[key])
except:
pass
except:
self.log.error('MySQLCollector: Couldnt get master status')
pass
if self.config['slave']:
metrics['slave'] = {}
try:
rows = self.get_db_slave_status()
for row_slave in rows:
for key, value in row_slave.items():
if key in self._IGNORE_KEYS:
continue
try:
metrics['slave'][key] = float(row_slave[key])
except:
pass
except:
self.log.error('MySQLCollector: Couldnt get slave status')
pass
if self.config['innodb_rseg']:
metrics['innodb_rseg'] = {}
try:
rows = self.get_db_innodb_rollback_segment()
for row_rseg in rows:
metrics['innodb_rseg']['Innodb_rseg_curr_size'] = float(row_rseg['curr_size'])
metrics['innodb_rseg']['Innodb_rseg_max_size'] = float(row_rseg['max_size'])
except:
self.log.error('MySQLCollector: Couldnt get InnoDB rollback segment stats')
pass
if self.config['processlist']:
metrics['processlist'] = {}
try:
rows = self.get_db_processlist()
for idx, row_processlist in enumerate(rows):
cmd = str(row_processlist['Command'])
if cmd in self._IGNORE_KEYS:
continue
query = str(row_processlist['Info'])
user = str(row_processlist['User'])
running_time = 0
if row_processlist['Time'] is not None:
running_time = float(row_processlist['Time'])
if cmd == "Sleep":
if running_time > self.config['idle_threshold']:
metrics['processlist'][idx] = {
'metric_name': 'MySQL_idle_threads',
'metric_value': running_time,
'dimensions': {
'user': str(user),
}
}
elif user not in self.config['ignore_users'] and running_time > self.config['query_threshold']:
metrics['processlist'][idx] = {
'metric_name': 'MySQL_long_queries',
'metric_value': running_time,
'dimensions': {
'user': str(user),
'query': str(query),
}
}
except:
self.log.error('MySQLCollector: Couldnt get processlist')
pass
if self.config['temp_tables']:
file_match = re.compile(self.config['temp_tables'])
metrics['temp_tables'] = {}
try:
tmpdir = self.config.get('tmpdir', False)
if tmpdir:
for idx, tmp_file in enumerate(os.listdir(tmpdir)):
if file_match.match(tmp_file):
metrics['temp_tables'][idx] = {
'metric_name': 'MySQL_temptable_size',
'metric_value': float(os.stat(os.path.join(tmpdir, tmp_file)).st_size),
}
else:
self.log.error("MySQLCollector: tempdir not defined in config")
except:
self.log.error('MySQLCollector: Couldnt get temp_table sizes')
pass
if self.config['innodb']:
metrics['innodb'] = {}
innodb_status_timer = time.time()
try:
rows = self.get_db_innodb_status()
innodb_status_output = rows[0]
todo = self.innodb_status_keys.keys()
for line in innodb_status_output['Status'].split('\n'):
for key in todo:
match = self.innodb_status_keys[key].match(line)
if match is not None:
todo.remove(key)
match_index = 1
for key_index in key.split(','):
try:
value = float(match.group(match_index))
# store value
if key_index in metrics:
self.log.debug("MySQLCollector: %s"
+ " already defined, "
+ " ignoring new value",
key_index)
else:
metrics['innodb'][key_index] = value
match_index += 1
except __HOLE__:
self.log.debug("MySQLCollector: Cannot find"
+ " value in innodb status "
+ "for %s", key_index)
for key in todo:
self.log.debug("MySQLCollector: %s regexp not matched in"
+ " innodb status", key)
except Exception, innodb_status_error:
self.log.error('MySQLCollector: Couldnt get engine innodb'
+ ' status, check user permissions: %s',
innodb_status_error)
Innodb_status_process_time = time.time() - innodb_status_timer
self.log.debug("MySQLCollector: innodb status process time: %f",
Innodb_status_process_time)
subkey = "Innodb_status_process_time"
metrics['innodb'][subkey] = Innodb_status_process_time
self.disconnect()
return metrics
|
IndexError
|
dataset/ETHPy150Open Yelp/fullerite/src/diamond/collectors/mysqlstat/mysqlstat.py/MySQLCollector.get_stats
|
8,363
|
def collect(self):
if MySQLdb is None:
self.log.error('Unable to import MySQLdb')
return False
for host in self.config['hosts']:
matches = re.search(
'^([^:]*):([^@]*)@([^:]*):?([^/]*)/([^/]*)/?(.*)', host)
if not matches:
self.log.error(
'Connection string not in required format, skipping: %s',
host)
continue
params = {}
params['host'] = matches.group(3)
try:
params['port'] = int(matches.group(4))
except __HOLE__:
params['port'] = 3306
params['db'] = matches.group(5)
params['user'] = matches.group(1)
params['passwd'] = matches.group(2)
nickname = matches.group(6)
if len(nickname):
nickname += '.'
if params['db'] == 'None':
del params['db']
try:
metrics = self.get_stats(params=params)
except Exception, e:
try:
self.disconnect()
except MySQLdb.ProgrammingError as ee:
self.log.error('Collection errored for %s %s', nickname, ee)
pass
self.log.error('Collection failed for %s %s', nickname, e)
continue
# Warn if publish contains an unknown variable
if 'publish' in self.config and metrics['status']:
for k in self.config['publish'].split():
if k not in metrics['status']:
self.log.error("No such key '%s' available, issue"
+ " 'show global status' for a full"
+ " list", k)
self._publish_stats(nickname, metrics)
|
ValueError
|
dataset/ETHPy150Open Yelp/fullerite/src/diamond/collectors/mysqlstat/mysqlstat.py/MySQLCollector.collect
|
8,364
|
def _safe_cls_name(cls):
try:
cls_name = '.'.join((cls.__module__, cls.__name__))
except __HOLE__:
cls_name = getattr(cls, '__name__', None)
if cls_name is None:
cls_name = repr(cls)
return cls_name
|
AttributeError
|
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/orm/exc.py/_safe_cls_name
|
8,365
|
@util.dependencies("sqlalchemy.orm.base")
def _default_unmapped(base, cls):
try:
mappers = base.manager_of_class(cls).mappers
except NO_STATE:
mappers = {}
except __HOLE__:
mappers = {}
name = _safe_cls_name(cls)
if not mappers:
return "Class '%s' is not mapped" % name
|
TypeError
|
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/orm/exc.py/_default_unmapped
|
8,366
|
def ipython(self):
"""Start any version of IPython"""
for ip in (self._ipython, self._ipython_pre_100, self._ipython_pre_011):
try:
ip()
except __HOLE__:
pass
else:
return
# no IPython, raise ImportError
raise ImportError("No IPython")
|
ImportError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/core/management/commands/shell.py/Command.ipython
|
8,367
|
def run_shell(self, shell=None):
available_shells = [shell] if shell else self.shells
for shell in available_shells:
try:
return getattr(self, shell)()
except __HOLE__:
pass
raise ImportError
|
ImportError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/core/management/commands/shell.py/Command.run_shell
|
8,368
|
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
get_models()
use_plain = options.get('plain', False)
no_startup = options.get('no_startup', False)
interface = options.get('interface', None)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell(shell=interface)
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except __HOLE__:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then .pythonrc.py.
if not no_startup:
for pythonrc in (os.environ.get("PYTHONSTARTUP"), '~/.pythonrc.py'):
if not pythonrc:
continue
pythonrc = os.path.expanduser(pythonrc)
if not os.path.isfile(pythonrc):
continue
try:
with open(pythonrc) as handle:
exec(compile(handle.read(), pythonrc, 'exec'), imported_objects)
except NameError:
pass
code.interact(local=imported_objects)
|
ImportError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/core/management/commands/shell.py/Command.handle_noargs
|
8,369
|
def isnan(value):
try:
from math import isnan
return isnan(value)
except __HOLE__:
return isinstance(value, float) and value != value
|
ImportError
|
dataset/ETHPy150Open Miserlou/django-easy-split/easy_split/stats.py/isnan
|
8,370
|
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.auth = self.data[:self.len]
buf = self.data[self.len:]
import ip
try:
self.data = ip.IP.get_proto(self.nxt)(buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (__HOLE__, dpkt.UnpackError):
self.data = buf
|
KeyError
|
dataset/ETHPy150Open dragondjf/QMarkdowner/dpkt/ah.py/AH.unpack
|
8,371
|
@bp.route('/<int:project_id>/builds/<id>/')
def build(project_id, id):
project = get_project(project_id, for_management=False)
if id == 'latest':
build = project.get_latest_build(ref=request.args.get('ref'))
if not build:
return redirect(url_for('.settings', id=project_id))
else:
try:
build_id = int(id)
except __HOLE__:
abort(404)
build = project.builds.filter_by(id=build_id).first_or_404()
job = build.jobs.first() # TODO Show first not finished job
return render_template(
'projects/job.html',
is_build_latest=(id == 'latest'),
project=project,
build=build,
job=job)
|
ValueError
|
dataset/ETHPy150Open aromanovich/kozmic-ci/kozmic/projects/views.py/build
|
8,372
|
def load(branchname):
data = json.load(open(os.path.expanduser(config.SAVE_FILE)))
repo_name = get_repo_name()
try:
return data['%s:%s' % (repo_name, branchname)]
except __HOLE__:
# possibly one of the old ones
return data[branchname]
|
KeyError
|
dataset/ETHPy150Open peterbe/bgg/bgg/lib/makediff.py/load
|
8,373
|
def pub_ListResources (self, credentials, options):
"""Return information about available resources or resources allocated to a slice
List the resources at this aggregate in an RSpec: may be all resources,
only those available for reservation, or only those already reserved for the given slice.
"""
try:
CredVerifier.checkValid(credentials, [])
# Parse options
compressed = options.get("geni_compressed", False)
urn = options.get("geni_slice_urn", None)
spec_version = options.get("geni_rspec_version")
supported_spec = {'version': '3', 'type': 'GENI'}
if spec_version:
if spec_version != supported_spec:
msg = "RSpec type/version not supported"
propertyList = self.buildPropertyList(GENI_ERROR_CODE.BADVERSION, output=msg)
return propertyList
else:
msg = "Required option geni_rspec_version missing"
propertyList = self.buildPropertyList(GENI_ERROR_CODE.BADARGS, output=msg)
return propertyList
if urn:
CredVerifier.checkValid(credentials, "getsliceresources", urn)
self.recordAction("listresources", credentials, urn)
sliver_urn = GeniDB.getSliverURN(urn)
if sliver_urn is None:
raise Exception("Sliver for slice URN (%s) does not exist" % (urn))
else:
rspec = GeniDB.getManifest(sliver_urn)
else:
self.recordAction("listresources", credentials)
rspec = foam.geni.lib.getAdvertisement()
if compressed:
zrspec = zlib.compress(rspec)
rspec = base64.b64encode(zrspec)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.SUCCESS, value=rspec)
except ExpatError:
msg = "Error parsing credential strings"
propertyList = self.buildPropertyList(GENI_ERROR_CODE.BADARGS, output=msg)
self._log.error(msg)
except UnknownSlice as x:
# Raised by GeniDB.getSliverURN()
msg = "Attempt to list resources on sliver for unknown slice %s" % (urn)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
x.log(self._log, msg, logging.INFO)
except xmlrpclib.Fault as x:
# Something thrown via GCF, we'll presume it was something related to credentials
msg = "GCF credential check failure: <%s>" % (x)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
self._log.info(msg)
self._log.debug(x, exc_info=True)
except __HOLE__ as x:
# New GCF problem with user creds that have no gid_caller, probably
msg = "GCF credential check failure: <%s>" % (x)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
self._log.info(msg)
self._log.debug(x, exc_info=True)
except Exception as e:
msg = "Exception: %s" % str(e)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
self._log.exception(msg)
finally:
return propertyList
|
AttributeError
|
dataset/ETHPy150Open fp7-ofelia/ocf/ofam/src/src/foam/api/gapi2.py/AMAPIv2.pub_ListResources
|
8,374
|
def addReader(self, reader):
"""
Implement L{IReactorFDSet.addReader}.
"""
fd = reader.fileno()
if fd not in self._reads:
try:
self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD)
except __HOLE__:
pass
finally:
self._selectables[fd] = reader
self._reads.add(fd)
|
OSError
|
dataset/ETHPy150Open twisted/twisted/twisted/internet/kqreactor.py/KQueueReactor.addReader
|
8,375
|
def addWriter(self, writer):
"""
Implement L{IReactorFDSet.addWriter}.
"""
fd = writer.fileno()
if fd not in self._writes:
try:
self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD)
except __HOLE__:
pass
finally:
self._selectables[fd] = writer
self._writes.add(fd)
|
OSError
|
dataset/ETHPy150Open twisted/twisted/twisted/internet/kqreactor.py/KQueueReactor.addWriter
|
8,376
|
def removeReader(self, reader):
"""
Implement L{IReactorFDSet.removeReader}.
"""
wasLost = False
try:
fd = reader.fileno()
except:
fd = -1
if fd == -1:
for fd, fdes in self._selectables.items():
if reader is fdes:
wasLost = True
break
else:
return
if fd in self._reads:
self._reads.remove(fd)
if fd not in self._writes:
del self._selectables[fd]
if not wasLost:
try:
self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_DELETE)
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open twisted/twisted/twisted/internet/kqreactor.py/KQueueReactor.removeReader
|
8,377
|
def removeWriter(self, writer):
"""
Implement L{IReactorFDSet.removeWriter}.
"""
wasLost = False
try:
fd = writer.fileno()
except:
fd = -1
if fd == -1:
for fd, fdes in self._selectables.items():
if writer is fdes:
wasLost = True
break
else:
return
if fd in self._writes:
self._writes.remove(fd)
if fd not in self._reads:
del self._selectables[fd]
if not wasLost:
try:
self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_DELETE)
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open twisted/twisted/twisted/internet/kqreactor.py/KQueueReactor.removeWriter
|
8,378
|
def doKEvent(self, timeout):
"""
Poll the kqueue for new events.
"""
if timeout is None:
timeout = 1
try:
events = self._kq.control([], len(self._selectables), timeout)
except __HOLE__ as e:
# Since this command blocks for potentially a while, it's possible
# EINTR can be raised for various reasons (for example, if the user
# hits ^C).
if e.errno == errno.EINTR:
return
else:
raise
_drdw = self._doWriteOrRead
for event in events:
fd = event.ident
try:
selectable = self._selectables[fd]
except KeyError:
# Handles the infrequent case where one selectable's
# handler disconnects another.
continue
else:
log.callWithLogger(selectable, _drdw, selectable, fd, event)
|
OSError
|
dataset/ETHPy150Open twisted/twisted/twisted/internet/kqreactor.py/KQueueReactor.doKEvent
|
8,379
|
def getAppBundleID(path):
"""Returns CFBundleIdentifier if available for application at path."""
infopath = os.path.join(path, 'Contents', 'Info.plist')
if os.path.exists(infopath):
try:
plist = FoundationPlist.readPlist(infopath)
if 'CFBundleIdentifier' in plist:
return plist['CFBundleIdentifier']
except (__HOLE__,
FoundationPlist.NSPropertyListSerializationException):
pass
return None
|
AttributeError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/getAppBundleID
|
8,380
|
def isItemInInstallInfo(manifestitem_pl, thelist, vers=''):
"""Determines if an item is in a manifest plist.
Returns True if the manifest item has already
been processed (it's in the list) and, optionally,
the version is the same or greater.
"""
for item in thelist:
try:
if item['name'] == manifestitem_pl['name']:
if not vers:
return True
#if the version already installed or processed to be
#installed is the same or greater, then we're good.
if item.get('installed') and (compareVersions(
item.get('installed_version'), vers) in (1, 2)):
return True
if (compareVersions(
item.get('version_to_install'), vers) in (1, 2)):
return True
except __HOLE__:
# item is missing 'name', so doesn't match
pass
return False
|
KeyError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/isItemInInstallInfo
|
8,381
|
def updateAvailableLicenseSeats(installinfo):
'''Records # of available seats for each optional install'''
license_info_url = munkicommon.pref('LicenseInfoURL')
if not license_info_url:
# nothing to do!
return
if not installinfo.get('optional_installs'):
# nothing to do!
return
license_info = {}
items_to_check = [item['name']
for item in installinfo['optional_installs']
if item.get('licensed_seat_info_available')
and not item['installed']]
# complicated logic here to 'batch' process our GET requests but
# keep them under 256 characters each
start_index = 0
# Use ampersand when the license_info_url contains a ?
q_char = "?"
if "?" in license_info_url:
q_char = "&"
while start_index < len(items_to_check):
end_index = len(items_to_check)
while True:
query_items = ['name=' + quote_plus(item)
for item in items_to_check[start_index:end_index]]
querystring = q_char + '&'.join(query_items)
url = license_info_url + querystring
if len(url) < 256:
break
# drop an item and see if we're under 256 characters
end_index = end_index - 1
munkicommon.display_debug1('Fetching licensed seat data from %s', url)
try:
license_data = getDataFromURL(url)
munkicommon.display_debug1('Got: %s', license_data)
license_dict = FoundationPlist.readPlistFromString(
license_data)
except (fetch.MunkiDownloadError, fetch.GurlDownloadError), err:
# problem fetching from URL
munkicommon.display_error('Error from %s: %s', url, err)
except FoundationPlist.FoundationPlistException:
# no data or bad data from URL
munkicommon.display_error(
'Bad license data from %s: %s', url, license_data)
else:
# merge data from license_dict into license_info
license_info.update(license_dict)
start_index = end_index
# use license_info to update our remaining seats
for item in installinfo['optional_installs']:
if item['name'] in items_to_check:
munkicommon.display_debug2(
'Looking for license info for %s', item['name'])
# record available seats for this item
seats_available = False
seat_info = license_info.get(item['name'], 0)
try:
seats_available = int(seat_info) > 0
munkicommon.display_debug1(
'Recording available seats for %s: %s',
item['name'], seats_available)
except __HOLE__:
munkicommon.display_warning(
'Bad license data for %s: %s', item['name'], seat_info)
item['licensed_seats_available'] = seats_available
|
ValueError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/updateAvailableLicenseSeats
|
8,382
|
def processInstall(manifestitem, cataloglist, installinfo,
is_managed_update=False):
"""Processes a manifest item for install. Determines if it needs to be
installed, and if so, if any items it is dependent on need to
be installed first. Installation detail is added to
installinfo['managed_installs']
Calls itself recursively as it processes dependencies.
Returns a boolean; when processing dependencies, a false return
will stop the installation of a dependent item
"""
if munkicommon.munkistatusoutput:
# reset progress indicator and detail field
munkistatus.percent('-1')
munkistatus.detail('')
manifestitemname = os.path.split(manifestitem)[1]
munkicommon.display_debug1(
'* Processing manifest item %s for install', manifestitemname)
(manifestitemname_withoutversion, includedversion) = nameAndVersion(
manifestitemname)
# have we processed this already?
if manifestitemname in installinfo['processed_installs']:
munkicommon.display_debug1(
'%s has already been processed for install.', manifestitemname)
return True
elif (manifestitemname_withoutversion in
installinfo['processed_uninstalls']):
munkicommon.display_warning(
'Will not process %s for install because it has already '
'been processed for uninstall!', manifestitemname)
return False
item_pl = getItemDetail(manifestitem, cataloglist)
if not item_pl:
munkicommon.display_warning(
'Could not process item %s for install. '
'No pkginfo found in catalogs: %s ',
manifestitem, ', '.join(cataloglist))
return False
elif is_managed_update:
# we're processing this as a managed update, so don't
# add it to the processed_installs list
pass
else:
# we found it, so add it to our list of procssed installs
# so we don't process it again in the future
munkicommon.display_debug2('Adding %s to list of processed installs'
% manifestitemname)
installinfo['processed_installs'].append(manifestitemname)
if isItemInInstallInfo(item_pl, installinfo['managed_installs'],
vers=item_pl.get('version')):
# has this item already been added to the list of things to install?
munkicommon.display_debug1(
'%s is or will be installed.', manifestitemname)
return True
# check dependencies
dependenciesMet = True
# there are two kinds of dependencies/relationships.
#
# 'requires' are prerequistes:
# package A requires package B be installed first.
# if package A is removed, package B is unaffected.
# requires can be a one to many relationship.
#
# The second type of relationship is 'update_for'.
# This signifies that that current package should be considered an update
# for the packages listed in the 'update_for' array. When processing a
# package, we look through the catalogs for other packages that declare
# they are updates for the current package and install them if needed.
# This can be a one-to-many relationship - one package can be an update
# for several other packages; for example, 'PhotoshopCS4update-11.0.1'
# could be an update for PhotoshopCS4 and for AdobeCS4DesignSuite.
#
# When removing an item, any updates for that item are removed as well.
if 'requires' in item_pl:
dependencies = item_pl['requires']
# fix things if 'requires' was specified as a string
# instead of an array of strings
if isinstance(dependencies, basestring):
dependencies = [dependencies]
for item in dependencies:
munkicommon.display_detail(
'%s-%s requires %s. Getting info on %s...'
% (item_pl.get('name', manifestitemname),
item_pl.get('version', ''), item, item))
success = processInstall(item, cataloglist, installinfo,
is_managed_update=is_managed_update)
if not success:
dependenciesMet = False
iteminfo = {}
iteminfo['name'] = item_pl.get('name', '')
iteminfo['display_name'] = item_pl.get('display_name', iteminfo['name'])
iteminfo['description'] = item_pl.get('description', '')
if not dependenciesMet:
munkicommon.display_warning('Didn\'t attempt to install %s '
'because could not resolve all '
'dependencies.', manifestitemname)
# add information to managed_installs so we have some feedback
# to display in MSC.app
iteminfo['installed'] = False
iteminfo['note'] = ('Can\'t install %s because could not resolve all '
'dependencies.' % iteminfo['display_name'])
installinfo['managed_installs'].append(iteminfo)
return False
installed_state = installedState(item_pl)
if installed_state == 0:
munkicommon.display_detail('Need to install %s', manifestitemname)
iteminfo['installer_item_size'] = item_pl.get(
'installer_item_size', 0)
iteminfo['installed_size'] = item_pl.get(
'installed_size', iteminfo['installer_item_size'])
try:
# Get a timestamp, then download the installer item.
start = datetime.datetime.now()
if item_pl.get('installer_type', 0) == 'nopkg':
# Packageless install
download_speed = 0
filename = 'packageless_install'
else:
if download_installeritem(item_pl, installinfo):
# Record the download speed to the InstallResults output.
end = datetime.datetime.now()
download_seconds = (end - start).seconds
try:
if iteminfo['installer_item_size'] < 1024:
# ignore downloads under 1 MB or speeds will
# be skewed.
download_speed = 0
else:
# installer_item_size is KBytes, so divide
# by seconds.
download_speed = int(
iteminfo['installer_item_size'] /
download_seconds)
except (__HOLE__, ValueError, ZeroDivisionError):
download_speed = 0
else:
# Item was already in cache; set download_speed to 0.
download_speed = 0
filename = getInstallerItemBasename(
item_pl['installer_item_location'])
iteminfo['download_kbytes_per_sec'] = download_speed
if download_speed:
munkicommon.display_detail(
'%s downloaded at %d KB/s', filename, download_speed)
# required keys
iteminfo['installer_item'] = filename
iteminfo['installed'] = False
iteminfo['version_to_install'] = item_pl.get('version', 'UNKNOWN')
# we will ignore the unattended_install key if the item needs a
# restart or logout...
if (item_pl.get('unattended_install') or
item_pl.get('forced_install')):
if item_pl.get('RestartAction', 'None') != 'None':
munkicommon.display_warning(
'Ignoring unattended_install key for %s '
'because RestartAction is %s.',
item_pl['name'], item_pl.get('RestartAction'))
else:
iteminfo['unattended_install'] = True
# optional keys
optional_keys = ['suppress_bundle_relocation',
'installer_choices_xml',
'installer_environment',
'adobe_install_info',
'RestartAction',
'installer_type',
'adobe_package_name',
'package_path',
'blocking_applications',
'installs',
'requires',
'update_for',
'payloads',
'preinstall_script',
'postinstall_script',
'items_to_copy', # used w/ copy_from_dmg
'copy_local', # used w/ AdobeCS5 Updaters
'force_install_after_date',
'apple_item',
'category',
'developer',
'icon_name',
'PayloadIdentifier',
'icon_hash',
'OnDemand']
for key in optional_keys:
if key in item_pl:
iteminfo[key] = item_pl[key]
if not 'apple_item' in iteminfo:
# admin did not explicitly mark this item; let's determine if
# it's from Apple
if isAppleItem(item_pl):
munkicommon.log(
'Marking %s as apple_item - this will block '
'Apple SUS updates' % iteminfo['name'])
iteminfo['apple_item'] = True
installinfo['managed_installs'].append(iteminfo)
update_list = []
# (manifestitemname_withoutversion, includedversion) =
# nameAndVersion(manifestitemname)
if includedversion:
# a specific version was specified in the manifest
# so look only for updates for this specific version
update_list = lookForUpdatesForVersion(
manifestitemname_withoutversion,
includedversion, cataloglist)
else:
# didn't specify a specific version, so
# now look for all updates for this item
update_list = lookForUpdates(manifestitemname_withoutversion,
cataloglist)
# now append any updates specifically
# for the version to be installed
update_list.extend(
lookForUpdatesForVersion(
manifestitemname_withoutversion,
iteminfo['version_to_install'],
cataloglist))
for update_item in update_list:
# call processInstall recursively so we get the
# latest version and dependencies
dummy_result = processInstall(
update_item, cataloglist, installinfo,
is_managed_update=is_managed_update)
return True
except fetch.PackageVerificationError:
munkicommon.display_warning(
'Can\'t install %s because the integrity check failed.',
manifestitem)
iteminfo['installed'] = False
iteminfo['note'] = 'Integrity check failed'
installinfo['managed_installs'].append(iteminfo)
return False
except fetch.GurlDownloadError, errmsg:
munkicommon.display_warning(
'Download of %s failed: %s', manifestitem, errmsg)
iteminfo['installed'] = False
iteminfo['note'] = 'Download failed (%s)' % errmsg
installinfo['managed_installs'].append(iteminfo)
return False
except fetch.MunkiDownloadError, errmsg:
munkicommon.display_warning(
'Can\'t install %s because: %s', manifestitemname, errmsg)
iteminfo['installed'] = False
iteminfo['note'] = '%s' % errmsg
installinfo['managed_installs'].append(iteminfo)
return False
else:
iteminfo['installed'] = True
# record installed size for reporting
iteminfo['installed_size'] = item_pl.get(
'installed_size', item_pl.get('installer_item_size', 0))
if installed_state == 1:
# just use the version from the pkginfo
iteminfo['installed_version'] = item_pl['version']
else:
# might be newer; attempt to figure out the version
installed_version = getInstalledVersion(item_pl)
if installed_version == "UNKNOWN":
installed_version = '(newer than %s)' % item_pl['version']
iteminfo['installed_version'] = installed_version
installinfo['managed_installs'].append(iteminfo)
# remove included version number if any
(name, includedversion) = nameAndVersion(manifestitemname)
munkicommon.display_detail('%s version %s (or newer) is already '
'installed.', name, item_pl['version'])
update_list = []
if not includedversion:
# no specific version is specified;
# the item is already installed;
# now look for updates for this item
update_list = lookForUpdates(name, cataloglist)
# and also any for this specific version
installed_version = iteminfo['installed_version']
if not installed_version.startswith('(newer than '):
update_list.extend(
lookForUpdatesForVersion(
name, installed_version, cataloglist))
elif compareVersions(
includedversion, iteminfo['installed_version']) == 1:
# manifest specifies a specific version
# if that's what's installed, look for any updates
# specific to this version
update_list = lookForUpdatesForVersion(
manifestitemname_withoutversion, includedversion, cataloglist)
# if we have any updates, process them
for update_item in update_list:
# call processInstall recursively so we get updates
# and any dependencies
dummy_result = processInstall(
update_item, cataloglist, installinfo,
is_managed_update=is_managed_update)
return True
|
TypeError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/processInstall
|
8,383
|
def processManifestForKey(manifest, manifest_key, installinfo,
parentcatalogs=None):
"""Processes keys in manifests to build the lists of items to install and
remove.
Can be recursive if manifests include other manifests.
Probably doesn't handle circular manifest references well.
manifest can be a path to a manifest file or a dictionary object.
"""
if isinstance(manifest, basestring):
munkicommon.display_debug1(
"** Processing manifest %s for %s" %
(os.path.basename(manifest), manifest_key))
manifestdata = getManifestData(manifest)
else:
manifestdata = manifest
manifest = 'embedded manifest'
cataloglist = manifestdata.get('catalogs')
if cataloglist:
getCatalogs(cataloglist)
elif parentcatalogs:
cataloglist = parentcatalogs
if not cataloglist:
munkicommon.display_warning('Manifest %s has no catalogs', manifest)
return
nestedmanifests = manifestdata.get('included_manifests')
if nestedmanifests:
for item in nestedmanifests:
try:
nestedmanifestpath = getmanifest(item)
except ManifestException:
nestedmanifestpath = None
if munkicommon.stopRequested():
return {}
if nestedmanifestpath:
processManifestForKey(nestedmanifestpath, manifest_key,
installinfo, cataloglist)
conditionalitems = manifestdata.get('conditional_items')
if conditionalitems:
munkicommon.display_debug1(
'** Processing conditional_items in %s', manifest)
# conditionalitems should be an array of dicts
# each dict has a predicate; the rest consists of the
# same keys as a manifest
for item in conditionalitems:
try:
predicate = item['condition']
except (__HOLE__, KeyError):
munkicommon.display_warning(
'Missing predicate for conditional_item %s', item)
continue
except BaseException:
munkicommon.display_warning(
'Conditional item is malformed: %s', item)
continue
INFO_OBJECT['catalogs'] = cataloglist
if predicateEvaluatesAsTrue(predicate):
conditionalmanifest = item
processManifestForKey(conditionalmanifest, manifest_key,
installinfo, cataloglist)
items = manifestdata.get(manifest_key)
if items:
for item in items:
if munkicommon.stopRequested():
return {}
if manifest_key == 'managed_installs':
dummy_result = processInstall(
item, cataloglist, installinfo)
elif manifest_key == 'managed_updates':
processManagedUpdate(item, cataloglist, installinfo)
elif manifest_key == 'optional_installs':
processOptionalInstall(item, cataloglist, installinfo)
elif manifest_key == 'managed_uninstalls':
dummy_result = processRemoval(
item, cataloglist, installinfo)
|
AttributeError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/processManifestForKey
|
8,384
|
def getManifestData(manifestpath):
'''Reads a manifest file, returns a dictionary-like object.'''
plist = {}
try:
plist = FoundationPlist.readPlist(manifestpath)
except FoundationPlist.NSPropertyListSerializationException:
munkicommon.display_error('Could not read plist: %s', manifestpath)
if os.path.exists(manifestpath):
try:
os.unlink(manifestpath)
except __HOLE__, err:
munkicommon.display_error(
'Failed to delete plist: %s', unicode(err))
else:
munkicommon.display_error('plist does not exist.')
return plist
|
OSError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/getManifestData
|
8,385
|
def getManifestValueForKey(manifestpath, keyname):
"""Returns a value for keyname in manifestpath"""
plist = getManifestData(manifestpath)
try:
return plist.get(keyname, None)
except __HOLE__, err:
munkicommon.display_error(
'Failed to get manifest value for key: %s (%s)',
manifestpath, keyname)
munkicommon.display_error(
'Manifest is likely corrupt: %s', unicode(err))
return None
# global to hold our catalog DBs
|
AttributeError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/getManifestValueForKey
|
8,386
|
def getCatalogs(cataloglist):
"""Retrieves the catalogs from the server and populates our catalogs
dictionary.
"""
#global CATALOG
catalogbaseurl = munkicommon.pref('CatalogURL') or \
munkicommon.pref('SoftwareRepoURL') + '/catalogs/'
if not catalogbaseurl.endswith('?') and not catalogbaseurl.endswith('/'):
catalogbaseurl = catalogbaseurl + '/'
munkicommon.display_debug2('Catalog base URL is: %s', catalogbaseurl)
catalog_dir = os.path.join(munkicommon.pref('ManagedInstallDir'),
'catalogs')
for catalogname in cataloglist:
if not catalogname in CATALOG:
catalogurl = catalogbaseurl + urllib2.quote(
catalogname.encode('UTF-8'))
catalogpath = os.path.join(catalog_dir, catalogname)
munkicommon.display_detail('Getting catalog %s...', catalogname)
message = 'Retrieving catalog "%s"...' % catalogname
try:
dummy_value = getResourceIfChangedAtomically(
catalogurl, catalogpath, message=message)
except fetch.MunkiDownloadError, err:
munkicommon.display_error(
'Could not retrieve catalog %s from server: %s',
catalogname, err)
else:
try:
catalogdata = FoundationPlist.readPlist(catalogpath)
except FoundationPlist.NSPropertyListSerializationException:
munkicommon.display_error(
'Retreived catalog %s is invalid.', catalogname)
try:
os.unlink(catalogpath)
except (OSError, __HOLE__):
pass
else:
CATALOG[catalogname] = makeCatalogDB(catalogdata)
|
IOError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/getCatalogs
|
8,387
|
def getmanifest(partialurl, suppress_errors=False):
"""Gets a manifest from the server.
Returns:
string local path to the downloaded manifest.
"""
#global MANIFESTS
manifestbaseurl = (munkicommon.pref('ManifestURL') or
munkicommon.pref('SoftwareRepoURL') + '/manifests/')
if (not manifestbaseurl.endswith('?') and
not manifestbaseurl.endswith('/')):
manifestbaseurl = manifestbaseurl + '/'
manifest_dir = os.path.join(munkicommon.pref('ManagedInstallDir'),
'manifests')
if (partialurl.startswith('http://') or
partialurl.startswith('https://') or
partialurl.startswith('file:/')):
# then it's really a request for the client's primary manifest
manifestdisplayname = os.path.basename(partialurl)
manifesturl = partialurl
partialurl = 'client_manifest'
manifestname = 'client_manifest.plist'
else:
# request for nested manifest
manifestdisplayname = partialurl
manifestname = partialurl
manifesturl = manifestbaseurl + urllib2.quote(partialurl)
if manifestname in MANIFESTS:
return MANIFESTS[manifestname]
munkicommon.display_debug2('Manifest base URL is: %s', manifestbaseurl)
munkicommon.display_detail('Getting manifest %s...', manifestdisplayname)
manifestpath = os.path.join(manifest_dir, manifestname)
# Create the folder the manifest shall be stored in
destinationdir = os.path.dirname(manifestpath)
try:
os.makedirs(destinationdir)
except __HOLE__ as e:
# OSError will be raised if destinationdir exists, ignore this case
if not os.path.isdir(destinationdir):
if not suppress_errors:
munkicommon.display_error(
'Could not create folder to store manifest %s: %s',
manifestdisplayname, e
)
return None
message = 'Retrieving list of software for this machine...'
try:
dummy_value = getResourceIfChangedAtomically(
manifesturl, manifestpath, message=message)
except fetch.MunkiDownloadError, err:
if not suppress_errors:
munkicommon.display_error(
'Could not retrieve manifest %s from the server: %s',
manifestdisplayname, err)
return None
try:
# read plist to see if it is valid
dummy_data = FoundationPlist.readPlist(manifestpath)
except FoundationPlist.NSPropertyListSerializationException:
errormsg = 'manifest returned for %s is invalid.' % manifestdisplayname
munkicommon.display_error(errormsg)
try:
os.unlink(manifestpath)
except (OSError, IOError):
pass
raise ManifestException(errormsg)
else:
# plist is valid
MANIFESTS[manifestname] = manifestpath
return manifestpath
|
OSError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/getmanifest
|
8,388
|
def cleanUpManifests():
"""Removes any manifest files that are no longer in use by this client"""
manifest_dir = os.path.join(
munkicommon.pref('ManagedInstallDir'), 'manifests')
exceptions = [
"SelfServeManifest"
]
for (dirpath, dirnames, filenames) in os.walk(manifest_dir, topdown=False):
for name in filenames:
if name in exceptions:
continue
abs_path = os.path.join(dirpath, name)
rel_path = abs_path[len(manifest_dir):].lstrip("/")
if rel_path not in MANIFESTS.keys():
os.unlink(abs_path)
# Try to remove the directory
# (rmdir will fail if directory is not empty)
try:
if dirpath != manifest_dir:
os.rmdir(dirpath)
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/cleanUpManifests
|
8,389
|
def download_icons(item_list):
'''Attempts to download icons (actually png files) for items in
item_list'''
icon_list = []
icon_known_exts = ['.bmp', '.gif', '.icns', '.jpg', '.jpeg', '.png', '.psd',
'.tga', '.tif', '.tiff', '.yuv']
icon_base_url = (munkicommon.pref('IconURL') or
munkicommon.pref('SoftwareRepoURL') + '/icons/')
icon_base_url = icon_base_url.rstrip('/') + '/'
icon_dir = os.path.join(munkicommon.pref('ManagedInstallDir'), 'icons')
munkicommon.display_debug2('Icon base URL is: %s', icon_base_url)
for item in item_list:
icon_name = item.get('icon_name') or item['name']
pkginfo_icon_hash = item.get('icon_hash')
if not os.path.splitext(icon_name)[1] in icon_known_exts:
icon_name += '.png'
icon_list.append(icon_name)
icon_url = icon_base_url + urllib2.quote(icon_name.encode('UTF-8'))
icon_path = os.path.join(icon_dir, icon_name)
if os.path.isfile(icon_path):
xattr_hash = fetch.getxattr(icon_path, fetch.XATTR_SHA)
if not xattr_hash:
xattr_hash = munkicommon.getsha256hash(icon_path)
fetch.writeCachedChecksum(icon_path, xattr_hash)
else:
xattr_hash = 'nonexistent'
icon_subdir = os.path.dirname(icon_path)
if not os.path.isdir(icon_subdir):
try:
os.makedirs(icon_subdir, 0755)
except OSError, err:
munkicommon.display_error(
'Could not create %s' % icon_subdir)
return
if pkginfo_icon_hash != xattr_hash:
item_name = item.get('display_name') or item['name']
message = 'Getting icon %s for %s...' % (icon_name, item_name)
try:
dummy_value = getResourceIfChangedAtomically(
icon_url, icon_path, message=message)
except fetch.MunkiDownloadError, err:
munkicommon.display_debug1(
'Could not retrieve icon %s from the server: %s',
icon_name, err)
else:
if os.path.isfile(icon_path):
fetch.writeCachedChecksum(icon_path)
# remove no-longer needed icons from the local directory
for (dirpath, dummy_dirnames, filenames) in os.walk(
icon_dir, topdown=False):
for filename in filenames:
icon_path = os.path.join(dirpath, filename)
rel_path = icon_path[len(icon_dir):].lstrip('/')
if rel_path not in icon_list:
try:
os.unlink(icon_path)
except (IOError, OSError), err:
pass
if len(munkicommon.listdir(dirpath)) == 0:
# did we empty out this directory (or is it already empty)?
# if so, remove it
try:
os.rmdir(dirpath)
except (IOError, __HOLE__), err:
pass
|
OSError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/download_icons
|
8,390
|
def download_client_resources():
"""Download client customization resources (if any)."""
# Munki's preferences can specify an explicit name
# under ClientResourcesFilename
# if that doesn't exist, use the primary manifest name as the
# filename. If that fails, try site_default.zip
filenames = []
resources_name = munkicommon.pref('ClientResourcesFilename')
if resources_name:
if os.path.splitext(resources_name)[1] != '.zip':
resources_name += '.zip'
filenames.append(resources_name)
else:
filenames.append(munkicommon.report['ManifestName'] + '.zip')
filenames.append('site_default.zip')
resource_base_url = (
munkicommon.pref('ClientResourceURL') or
munkicommon.pref('SoftwareRepoURL') + '/client_resources/')
resource_base_url = resource_base_url.rstrip('/') + '/'
resource_dir = os.path.join(
munkicommon.pref('ManagedInstallDir'), 'client_resources')
munkicommon.display_debug2(
'Client resources base URL is: %s', resource_base_url)
# make sure local resource directory exists
if not os.path.isdir(resource_dir):
try:
os.makedirs(resource_dir, 0755)
except __HOLE__, err:
munkicommon.display_error(
'Could not create %s' % resource_dir)
return
resource_archive_path = os.path.join(resource_dir, 'custom.zip')
message = 'Getting client resources...'
downloaded_resource_path = None
for filename in filenames:
resource_url = resource_base_url + filename
try:
dummy_value = getResourceIfChangedAtomically(
resource_url, resource_archive_path, message=message)
downloaded_resource_path = resource_archive_path
break
except fetch.MunkiDownloadError, err:
munkicommon.display_debug1(
'Could not retrieve client resources with name %s: %s',
filename, err)
if downloaded_resource_path is None:
# make sure we don't have an old custom.zip hanging around
if os.path.exists(resource_archive_path):
try:
os.unlink(resource_archive_path)
except (OSError, IOError), err:
munkicommon.display_error(
'Could not remove stale %s: %s', resource_archive_path, err)
|
OSError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/download_client_resources
|
8,391
|
def check(client_id='', localmanifestpath=None):
"""Checks for available new or updated managed software, downloading
installer items if needed. Returns 1 if there are available updates,
0 if there are no available updates, and -1 if there were errors."""
global MACHINE
munkicommon.getMachineFacts()
MACHINE = munkicommon.getMachineFacts()
munkicommon.report['MachineInfo'] = MACHINE
global CONDITIONS
munkicommon.getConditions()
CONDITIONS = munkicommon.getConditions()
keychain_obj = keychain.MunkiKeychain()
ManagedInstallDir = munkicommon.pref('ManagedInstallDir')
if munkicommon.munkistatusoutput:
munkistatus.activate()
munkicommon.log('### Beginning managed software check ###')
munkicommon.display_status_major('Checking for available updates...')
if localmanifestpath:
mainmanifestpath = localmanifestpath
else:
mainmanifestpath = getPrimaryManifest(client_id)
if munkicommon.stopRequested():
return 0
installinfo = {}
if mainmanifestpath:
# initialize our installinfo record
installinfo['processed_installs'] = []
installinfo['processed_uninstalls'] = []
installinfo['managed_updates'] = []
installinfo['optional_installs'] = []
installinfo['managed_installs'] = []
installinfo['removals'] = []
# set up INFO_OBJECT for conditional item comparisons
makePredicateInfoObject()
munkicommon.report['Conditions'] = INFO_OBJECT
munkicommon.display_detail('**Checking for installs**')
processManifestForKey(mainmanifestpath, 'managed_installs',
installinfo)
if munkicommon.stopRequested():
return 0
if munkicommon.munkistatusoutput:
# reset progress indicator and detail field
munkistatus.message('Checking for additional changes...')
munkistatus.percent('-1')
munkistatus.detail('')
# now generate a list of items to be uninstalled
munkicommon.display_detail('**Checking for removals**')
processManifestForKey(mainmanifestpath, 'managed_uninstalls',
installinfo)
if munkicommon.stopRequested():
return 0
# now check for implicit removals
# use catalogs from main manifest
cataloglist = getManifestValueForKey(mainmanifestpath, 'catalogs')
autoremovalitems = getAutoRemovalItems(installinfo, cataloglist)
if autoremovalitems:
munkicommon.display_detail('**Checking for implicit removals**')
for item in autoremovalitems:
if munkicommon.stopRequested():
return 0
dummy_result = processRemoval(item, cataloglist, installinfo)
# look for additional updates
munkicommon.display_detail('**Checking for managed updates**')
processManifestForKey(mainmanifestpath, 'managed_updates',
installinfo)
if munkicommon.stopRequested():
return 0
# build list of optional installs
processManifestForKey(mainmanifestpath, 'optional_installs',
installinfo)
if munkicommon.stopRequested():
return 0
# verify available license seats for optional installs
if installinfo.get('optional_installs'):
updateAvailableLicenseSeats(installinfo)
# process LocalOnlyManifest installs
localonlymanifestname = munkicommon.pref('LocalOnlyManifest')
if localonlymanifestname:
localonlymanifest = os.path.join(
ManagedInstallDir, 'manifests', localonlymanifestname)
# if the manifest already exists, the name is being reused
if localonlymanifestname in MANIFESTS:
munkicommon.display_error(
"LocalOnlyManifest %s has the same name as an existing " \
"manifest, skipping...", localonlymanifestname
)
else:
MANIFESTS[localonlymanifestname] = localonlymanifest
if os.path.exists(localonlymanifest):
# use catalogs from main manifest for local only manifest
cataloglist = getManifestValueForKey(
mainmanifestpath, 'catalogs')
munkicommon.display_detail(
'**Processing local-only choices**'
)
localonlyinstalls = getManifestValueForKey(
localonlymanifest, 'managed_installs') or []
for item in localonlyinstalls:
dummy_result = processInstall(
item,
cataloglist,
installinfo
)
localonlyuninstalls = getManifestValueForKey(
localonlymanifest, 'managed_uninstalls') or []
for item in localonlyuninstalls:
dummy_result = processRemoval(
item,
cataloglist,
installinfo
)
# now process any self-serve choices
usermanifest = '/Users/Shared/.SelfServeManifest'
selfservemanifest = os.path.join(
ManagedInstallDir, 'manifests', 'SelfServeManifest')
if os.path.exists(usermanifest):
# copy user-generated SelfServeManifest to our
# ManagedInstallDir
try:
plist = FoundationPlist.readPlist(usermanifest)
if plist:
FoundationPlist.writePlist(plist, selfservemanifest)
# now remove the user-generated manifest
try:
os.unlink(usermanifest)
except OSError:
pass
except FoundationPlist.FoundationPlistException:
# problem reading the usermanifest
# better remove it
munkicommon.display_error('Could not read %s', usermanifest)
try:
os.unlink(usermanifest)
except OSError:
pass
if os.path.exists(selfservemanifest):
# use catalogs from main manifest for self-serve manifest
cataloglist = getManifestValueForKey(
mainmanifestpath, 'catalogs')
munkicommon.display_detail('**Processing self-serve choices**')
selfserveinstalls = getManifestValueForKey(selfservemanifest,
'managed_installs')
# build list of items in the optional_installs list
# that have not exceeded available seats
available_optional_installs = [
item['name']
for item in installinfo.get('optional_installs', [])
if (not 'licensed_seats_available' in item
or item['licensed_seats_available'])]
if selfserveinstalls:
# filter the list, removing any items not in the current list
# of available self-serve installs
selfserveinstalls = [item for item in selfserveinstalls
if item in available_optional_installs]
for item in selfserveinstalls:
dummy_result = processInstall(
item, cataloglist, installinfo)
# we don't need to filter uninstalls
selfserveuninstalls = getManifestValueForKey(
selfservemanifest, 'managed_uninstalls') or []
for item in selfserveuninstalls:
dummy_result = processRemoval(item, cataloglist, installinfo)
# update optional_installs with install/removal info
for item in installinfo['optional_installs']:
if (not item.get('installed') and
isItemInInstallInfo(
item, installinfo['managed_installs'])):
item['will_be_installed'] = True
elif (item.get('installed') and
isItemInInstallInfo(item, installinfo['removals'])):
item['will_be_removed'] = True
# filter managed_installs to get items already installed
installed_items = [item.get('name', '')
for item in installinfo['managed_installs']
if item.get('installed')]
# filter managed_installs to get problem items:
# not installed, but no installer item
problem_items = [item
for item in installinfo['managed_installs']
if item.get('installed') == False and
not item.get('installer_item')]
# filter removals to get items already removed
# (or never installed)
removed_items = [item.get('name', '')
for item in installinfo['removals']
if item.get('installed') == False]
if os.path.exists(selfservemanifest):
# for any item in the managed_uninstalls in the self-serve
# manifest that is not installed, we should remove it from
# the list
try:
plist = FoundationPlist.readPlist(selfservemanifest)
except FoundationPlist.FoundationPlistException:
pass
else:
plist['managed_uninstalls'] = [
item for item in plist.get('managed_uninstalls', [])
if item not in removed_items]
try:
FoundationPlist.writePlist(plist, selfservemanifest)
except FoundationPlist.FoundationPlistException:
pass
# record detail before we throw it away...
munkicommon.report['ManagedInstalls'] = installinfo['managed_installs']
munkicommon.report['InstalledItems'] = installed_items
munkicommon.report['ProblemInstalls'] = problem_items
munkicommon.report['RemovedItems'] = removed_items
munkicommon.report['managed_installs_list'] = installinfo[
'processed_installs']
munkicommon.report['managed_uninstalls_list'] = installinfo[
'processed_uninstalls']
munkicommon.report['managed_updates_list'] = installinfo[
'managed_updates']
# filter managed_installs and removals lists
# so they have only items that need action
installinfo['managed_installs'] = [
item for item in installinfo['managed_installs']
if item.get('installer_item')]
installinfo['removals'] = [
item for item in installinfo['removals']
if item.get('installed')]
# also record problem items so MSC.app can provide feedback
installinfo['problem_items'] = problem_items
# download display icons for optional installs
# and active installs/removals
item_list = list(installinfo.get('optional_installs', []))
item_list.extend(installinfo['managed_installs'])
item_list.extend(installinfo['removals'])
download_icons(item_list)
# get any custom client resources
download_client_resources()
# record the filtered lists
munkicommon.report['ItemsToInstall'] = installinfo['managed_installs']
munkicommon.report['ItemsToRemove'] = installinfo['removals']
# clean up catalogs directory
cleanUpCatalogs()
# clean up manifests directory
cleanUpManifests()
# clean up cache dir
# remove any item in the cache that isn't scheduled
# to be used for an install or removal
# this could happen if an item is downloaded on one
# updatecheck run, but later removed from the manifest
# before it is installed or removed - so the cached item
# is no longer needed.
cache_list = [item['installer_item']
for item in installinfo.get('managed_installs', [])]
cache_list.extend([item['uninstaller_item']
for item in installinfo.get('removals', [])
if item.get('uninstaller_item')])
cachedir = os.path.join(ManagedInstallDir, 'Cache')
for item in munkicommon.listdir(cachedir):
if item.endswith('.download'):
# we have a partial download here
# remove the '.download' from the end of the filename
fullitem = os.path.splitext(item)[0]
if os.path.exists(os.path.join(cachedir, fullitem)):
# we have a partial and a full download
# for the same item. (This shouldn't happen.)
# remove the partial download.
os.unlink(os.path.join(cachedir, item))
elif problem_items == []:
# problem items is our list of items
# that need to be installed but are missing
# the installer_item; these might be partial
# downloads. So if we have no problem items, it's
# OK to get rid of any partial downloads hanging
# around.
os.unlink(os.path.join(cachedir, item))
elif item not in cache_list:
munkicommon.display_detail('Removing %s from cache', item)
os.unlink(os.path.join(cachedir, item))
# write out install list so our installer
# can use it to install things in the right order
installinfochanged = True
installinfopath = os.path.join(ManagedInstallDir, 'InstallInfo.plist')
if os.path.exists(installinfopath):
try:
oldinstallinfo = FoundationPlist.readPlist(installinfopath)
except FoundationPlist.NSPropertyListSerializationException:
oldinstallinfo = None
munkicommon.display_error(
'Could not read InstallInfo.plist. Deleting...')
try:
os.unlink(installinfopath)
except __HOLE__, e:
munkicommon.display_error(
'Failed to delete InstallInfo.plist: %s', str(e))
if oldinstallinfo == installinfo:
installinfochanged = False
munkicommon.display_detail('No change in InstallInfo.')
if installinfochanged:
FoundationPlist.writePlist(installinfo,
os.path.join(ManagedInstallDir,
'InstallInfo.plist'))
else:
# couldn't get a primary manifest. Check to see if we have a valid
# install/remove list from an earlier run.
munkicommon.display_error(
'Could not retrieve managed install primary manifest.')
installinfopath = os.path.join(ManagedInstallDir, 'InstallInfo.plist')
if os.path.exists(installinfopath):
try:
installinfo = FoundationPlist.readPlist(installinfopath)
except FoundationPlist.NSPropertyListSerializationException:
installinfo = {}
munkicommon.report['ItemsToInstall'] = \
installinfo.get('managed_installs', [])
munkicommon.report['ItemsToRemove'] = \
installinfo.get('removals', [])
munkicommon.savereport()
munkicommon.log('### End managed software check ###')
installcount = len(installinfo.get('managed_installs', []))
removalcount = len(installinfo.get('removals', []))
if installcount or removalcount:
return 1
else:
return 0
|
OSError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/check
|
8,392
|
def getDataFromURL(url):
'''Returns data from url as string. We use the existing
getResourceIfChangedAtomically function so any custom
authentication/authorization headers are reused'''
urldata = os.path.join(munkicommon.tmpdir(), 'urldata')
if os.path.exists(urldata):
try:
os.unlink(urldata)
except (IOError, OSError), err:
munkicommon.display_warning('Error in getDataFromURL: %s', err)
dummy_result = getResourceIfChangedAtomically(url, urldata)
try:
fdesc = open(urldata)
data = fdesc.read()
fdesc.close()
os.unlink(urldata)
return data
except (IOError, __HOLE__), err:
munkicommon.display_warning('Error in getDataFromURL: %s', err)
return ''
|
OSError
|
dataset/ETHPy150Open munki/munki/code/client/munkilib/updatecheck.py/getDataFromURL
|
8,393
|
def handle(self, *fixture_labels, **options):
using = options.get('database')
connection = connections[using]
self.style = no_style()
if not len(fixture_labels):
self.stderr.write(
self.style.ERROR("No database fixture specified. Please provide the path of at least one fixture in the command line.\n")
)
return
verbosity = int(options.get('verbosity'))
show_traceback = options.get('traceback')
# commit is a stealth option - it isn't really useful as
# a command line option, but it can be useful when invoking
# loaddata from within another script.
# If commit=True, loaddata will use its own transaction;
# if commit=False, the data load SQL will become part of
# the transaction in place when loaddata was invoked.
commit = options.get('commit', True)
# Keep a count of the installed objects and fixtures
fixture_count = 0
loaded_object_count = 0
fixture_object_count = 0
models = set()
humanize = lambda dirname: "'%s'" % dirname if dirname else 'absolute path'
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connection.cursor()
# Start transaction management. All fixtures are installed in a
# single transaction to ensure that all references are resolved.
if commit:
transaction.commit_unless_managed(using=using)
transaction.enter_transaction_management(using=using)
transaction.managed(True, using=using)
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if settings.DEBUG:
assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
compression_types = {
None: open,
'gz': gzip.GzipFile,
'zip': SingleZipReader
}
if has_bz2:
compression_types['bz2'] = bz2.BZ2File
app_module_paths = []
for app in get_apps():
if hasattr(app, '__path__'):
# It's a 'models/' subpackage
for path in app.__path__:
app_module_paths.append(path)
else:
# It's a models.py module
app_module_paths.append(app.__file__)
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
try:
with connection.constraint_checks_disabled():
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) > 1 and parts[-1] in compression_types:
compression_formats = [parts[-1]]
parts = parts[:-1]
else:
compression_formats = compression_types.keys()
if len(parts) == 1:
fixture_name = parts[0]
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
if formats:
if verbosity >= 2:
self.stdout.write("Loading '%s' fixtures...\n" % fixture_name)
else:
self.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format.\n" %
(fixture_name, format)))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
if verbosity >= 2:
self.stdout.write("Checking %s for fixtures...\n" % humanize(fixture_dir))
label_found = False
for combo in product([using, None], formats, compression_formats):
database, format, compression_format = combo
file_name = '.'.join(
p for p in [
fixture_name, database, format, compression_format
]
if p
)
if verbosity >= 3:
self.stdout.write("Trying %s for %s fixture '%s'...\n" % \
(humanize(fixture_dir), file_name, fixture_name))
full_path = os.path.join(fixture_dir, file_name)
open_method = compression_types[compression_format]
try:
fixture = open_method(full_path, 'r')
except IOError:
if verbosity >= 2:
self.stdout.write("No %s fixture '%s' in %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
else:
try:
if label_found:
self.stderr.write(self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting.\n" %
(fixture_name, humanize(fixture_dir))))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if verbosity >= 2:
self.stdout.write("Installing %s fixture '%s' from %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
objects = serializers.deserialize(format, fixture, using=using)
for obj in objects:
objects_in_fixture += 1
if router.allow_syncdb(using, obj.object.__class__):
loaded_objects_in_fixture += 1
models.add(obj.object.__class__)
try:
obj.save(using=using)
except (DatabaseError, IntegrityError), e:
msg = "Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': e
}
raise e.__class__, e.__class__(msg), sys.exc_info()[2]
loaded_object_count += loaded_objects_in_fixture
fixture_object_count += objects_in_fixture
label_found = True
finally:
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
self.stderr.write(
self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)\n" %
(fixture_name)))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in models]
connection.check_constraints(table_names=table_names)
except (SystemExit, __HOLE__):
raise
except Exception:
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s\n" %
(full_path, ''.join(traceback.format_exception(sys.exc_type,
sys.exc_value, sys.exc_traceback)))))
return
# If we found even one object in a fixture, we need to reset the
# database sequences.
if loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
if sequence_sql:
if verbosity >= 2:
self.stdout.write("Resetting sequences\n")
for line in sequence_sql:
cursor.execute(line)
if commit:
transaction.commit(using=using)
transaction.leave_transaction_management(using=using)
if verbosity >= 1:
if fixture_object_count == loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)\n" % (
loaded_object_count, fixture_count))
else:
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)\n" % (
loaded_object_count, fixture_object_count, fixture_count))
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
if commit:
connection.close()
|
KeyboardInterrupt
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/core/management/commands/loaddata.py/Command.handle
|
8,394
|
def _fill_to(self, index):
if self._last >= index:
return True
while self._last < index:
try:
n = next(self._origin)
except __HOLE__:
return False
self._last += 1
self._collection.append(n)
return True
|
StopIteration
|
dataset/ETHPy150Open kachayev/fn.py/fn/stream.py/Stream._fill_to
|
8,395
|
def _run_with_timeout(p, timeout, kill_signal, kill_tree=True):
"""Return False if we timed out, True else."""
def alarm_handler(signum, frame):
raise _Alarm
if timeout == 0: # this is mostly useful for testing
return False
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(timeout)
try:
p.communicate()
signal.alarm(0)
return True
except _Alarm:
pids = [p.pid]
if kill_tree:
pids.extend(_get_process_children(p.pid))
for pid in pids:
# process might have died before getting to this line
# so wrap to avoid OSError: no such process
try:
os.kill(pid, kill_signal)
except __HOLE__:
pass
return False
|
OSError
|
dataset/ETHPy150Open Khan/alertlib/timeout.py/_run_with_timeout
|
8,396
|
def replace_url(self, url):
if url.startswith('data:'):
# Don't even both sending data: through urlparse(),
# who knows how well it'll deal with a lot of data.
return
# Ignore any urls which are not relative
parsed = urlparse.urlparse(url)
if parsed.scheme or parsed.netloc or parsed.path.startswith('/'):
return
# Since this runs BEFORE cssrewrite, we can thus assume that urls
# will be relative to the file location.
#
# Notes:
# - Django might need to override this filter for staticfiles if it
# it should be possible to resolve cross-references between
# different directories.
# - For Flask-Assets blueprints, the logic might need to be:
# 1) Take source_path, convert into correct url via absurl().
# 2) Join with the URL be be replaced.
# 3) Convert url back to the filesystem path to which the url
# would map (the hard part?).
#
filename = os.path.join(os.path.dirname(self.source_path), url)
try:
if os.stat(filename).st_size <= (self.max_size or 2048):
with open(filename, 'rb') as f:
data = b64encode(f.read())
return 'data:%s;base64,%s' % (
mimetypes.guess_type(filename)[0], data.decode())
except (__HOLE__, IOError):
# Ignore the file not existing.
# TODO: When we have a logging system, this could produce a warning
return
|
OSError
|
dataset/ETHPy150Open miracle2k/webassets/src/webassets/filter/datauri.py/CSSDataUri.replace_url
|
8,397
|
def __init__(self, params):
timeout = params.get('timeout', params.get('TIMEOUT', 300))
if timeout is not None:
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get('OPTIONS', {})
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
try:
self._cull_frequency = int(cull_frequency)
except (__HOLE__, TypeError):
self._cull_frequency = 3
self.key_prefix = params.get('KEY_PREFIX', '')
self.version = params.get('VERSION', 1)
self.key_func = get_key_func(params.get('KEY_FUNCTION'))
|
ValueError
|
dataset/ETHPy150Open django/django/django/core/cache/backends/base.py/BaseCache.__init__
|
8,398
|
def semilinear(x):
""" This function ensures that the values of the array are always positive. It is
x+1 for x=>0 and exp(x) for x<0. """
try:
# assume x is a numpy array
shape = x.shape
x.flatten()
x = x.tolist()
except __HOLE__:
# no, it wasn't: build shape from length of list
shape = (1, len(x))
def f(val):
if val < 0:
# exponential function for x<0
return safeExp(val)
else:
# linear function for x>=0
return val + 1.0
return array(list(map(f, x))).reshape(shape)
|
AttributeError
|
dataset/ETHPy150Open pybrain/pybrain/pybrain/tools/functions.py/semilinear
|
8,399
|
def semilinearPrime(x):
""" This function is the first derivative of the semilinear function (above).
It is needed for the backward pass of the module. """
try:
# assume x is a numpy array
shape = x.shape
x.flatten()
x = x.tolist()
except __HOLE__:
# no, it wasn't: build shape from length of list
shape = (1, len(x))
def f(val):
if val < 0:
# exponential function for x<0
return safeExp(val)
else:
# linear function for x>=0
return 1.0
return array(list(map(f, x))).reshape(shape)
|
AttributeError
|
dataset/ETHPy150Open pybrain/pybrain/pybrain/tools/functions.py/semilinearPrime
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.