Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
|---|---|---|---|
9,600
|
def load_tool(result):
"""
Load the module with the tool-specific code.
"""
def load_tool_module(tool_module):
if not tool_module:
logging.warning('Cannot extract values from log files for benchmark results %s '
'(missing attribute "toolmodule" on tag "result").',
Util.prettylist(result.attributes['name']))
return None
try:
logging.debug('Loading %s', tool_module)
return __import__(tool_module, fromlist=['Tool']).Tool()
except ImportError as ie:
logging.warning(
'Missing module "%s", cannot extract values from log files (ImportError: %s).',
tool_module, ie)
except __HOLE__:
logging.warning(
'The module "%s" does not define the necessary class Tool, '
'cannot extract values from log files.',
tool_module)
return None
tool_module = result.attributes['toolmodule'][0] if 'toolmodule' in result.attributes else None
if tool_module in loaded_tools:
return loaded_tools[tool_module]
else:
result = load_tool_module(tool_module)
loaded_tools[tool_module] = result
return result
|
AttributeError
|
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/tablegenerator/__init__.py/load_tool
|
9,601
|
def parse_results_file(resultFile, run_set_id=None, ignore_errors=False):
'''
This function parses a XML file with the results of the execution of a run set.
It returns the "result" XML tag.
@param resultFile: The file name of the XML file with the results.
@param run_set_id: An optional identifier of this set of results.
'''
logging.info(' %s', resultFile)
url = Util.make_url(resultFile)
parse = ElementTree.ElementTree().parse
try:
with Util.open_url_seekable(url, mode='rb') as f:
try:
try:
resultElem = parse(gzip.GzipFile(fileobj=f))
except IOError:
f.seek(0)
try:
resultElem = parse(bz2.BZ2File(f))
except __HOLE__:
# Python 3.2 does not support giving a file-like object to BZ2File
resultElem = parse(io.BytesIO(bz2.decompress(f.read())))
except IOError:
f.seek(0)
resultElem = parse(f)
except IOError as e:
logging.error('Could not read result file %s: %s', resultFile, e)
exit(1)
except ElementTree.ParseError as e:
logging.error('Result file %s is invalid: %s', resultFile, e)
exit(1)
if resultElem.tag not in ['result', 'test']:
logging.error("XML file with benchmark results seems to be invalid.\n"
"The root element of the file is not named 'result' or 'test'.\n"
"If you want to run a table-definition file,\n"
"you should use the option '-x' or '--xml'.")
exit(1)
if ignore_errors and 'error' in resultElem.attrib:
logging.warning('Ignoring file "%s" because of error: %s',
resultFile,
resultElem.attrib['error'])
return None
if run_set_id is not None:
for sourcefile in _get_run_tags_from_xml(resultElem):
sourcefile.set('runset', run_set_id)
insert_logfile_names(resultFile, resultElem)
return resultElem
|
TypeError
|
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/tablegenerator/__init__.py/parse_results_file
|
9,602
|
@staticmethod
def create_from_xml(sourcefileTag, get_value_from_logfile, listOfColumns, correct_only,
log_zip_cache):
'''
This function collects the values from one run.
Only columns that should be part of the table are collected.
'''
def read_logfile_lines(log_file):
if not log_file:
return []
log_file_url = Util.make_url(log_file)
url_parts = urllib.parse.urlparse(log_file_url, allow_fragments=False)
log_zip_path = os.path.dirname(url_parts.path) + ".zip"
log_zip_url = urllib.parse.urlunparse((url_parts.scheme, url_parts.netloc,
log_zip_path, url_parts.params, url_parts.query, url_parts.fragment))
path_in_zip = urllib.parse.unquote(
os.path.relpath(url_parts.path, os.path.dirname(log_zip_path)))
try:
with Util.open_url_seekable(log_file_url, 'rt') as logfile:
return logfile.readlines()
except IOError as unused_e1:
try:
if log_zip_url not in log_zip_cache:
log_zip_cache[log_zip_url] = zipfile.ZipFile(
Util.open_url_seekable(log_zip_url, 'rb'))
log_zip = log_zip_cache[log_zip_url]
try:
with io.TextIOWrapper(log_zip.open(path_in_zip)) as logfile:
return logfile.readlines()
except __HOLE__:
logging.warning("Could not find logfile '%s' in archive '%s'.",
log_file, log_zip_url)
return []
except IOError as unused_e2:
logging.warning("Could not find logfile '%s' nor log archive '%s'.",
log_file, log_zip_url)
return []
status = Util.get_column_value(sourcefileTag, 'status', '')
category = Util.get_column_value(sourcefileTag, 'category', result.CATEGORY_MISSING)
score = result.score_for_task(sourcefileTag.get('name'),
sourcefileTag.get('properties', '').split(),
category,
status)
logfileLines = None
values = []
for column in listOfColumns: # for all columns that should be shown
value = None # default value
if column.title.lower() == 'score':
value = str(score)
elif column.title.lower() == 'status':
value = status
elif not correct_only or category == result.CATEGORY_CORRECT:
if not column.pattern or column.href:
# collect values from XML
value = Util.get_column_value(sourcefileTag, column.title)
else: # collect values from logfile
if logfileLines is None: # cache content
logfileLines = read_logfile_lines(sourcefileTag.get('logfile'))
value = get_value_from_logfile(logfileLines, column.pattern)
values.append(value)
return RunResult(get_task_id(sourcefileTag), status, category, score, sourcefileTag.get('logfile'), listOfColumns, values)
|
KeyError
|
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/tablegenerator/__init__.py/RunResult.create_from_xml
|
9,603
|
def get_table_head(runSetResults, commonFileNamePrefix):
# This list contains the number of columns each run set has
# (the width of a run set in the final table)
# It is used for calculating the column spans of the header cells.
runSetWidths = [len(runSetResult.columns) for runSetResult in runSetResults]
for runSetResult in runSetResults:
# Ugly because this overwrites the entries in the map,
# but we don't need them anymore and this is the easiest way
for key in runSetResult.attributes:
values = runSetResult.attributes[key]
if key == 'turbo':
turbo_values = list(set(values))
if len(turbo_values) > 1:
turbo = 'mixed'
elif turbo_values[0] == 'true':
turbo = 'enabled'
elif turbo_values[0] == 'false':
turbo = 'disabled'
else:
turbo = None
runSetResult.attributes['turbo'] = ', Turbo Boost: {}'.format(turbo) if turbo else ''
elif key == 'timelimit':
def fix_unit_display(value):
if len(value) >= 2 and value[-1] == 's' and value[-2] != ' ':
return value[:-1] + ' s'
return value
runSetResult.attributes[key] = Util.prettylist(map(fix_unit_display, values))
elif key == 'memlimit' or key == 'ram':
def round_to_MB(value):
try:
return "{:.0f} MB".format(int(value)/_BYTE_FACTOR/_BYTE_FACTOR)
except __HOLE__:
return value
runSetResult.attributes[key] = Util.prettylist(map(round_to_MB, values))
elif key == 'freq':
def round_to_MHz(value):
try:
return "{:.0f} MHz".format(int(value)/1000/1000)
except ValueError:
return value
runSetResult.attributes[key] = Util.prettylist(map(round_to_MHz, values))
else:
runSetResult.attributes[key] = Util.prettylist(values)
def get_row(rowName, format_string, collapse=False, onlyIf=None, default='Unknown'):
def format_cell(attributes):
if onlyIf and not onlyIf in attributes:
formatStr = default
else:
formatStr = format_string
return formatStr.format(**attributes)
values = [format_cell(runSetResult.attributes) for runSetResult in runSetResults]
if not any(values):
return None # skip row without values completely
valuesAndWidths = list(Util.collapse_equal_values(values, runSetWidths)) \
if collapse else list(zip(values, runSetWidths))
return tempita.bunch(id=rowName.lower().split(' ')[0],
name=rowName,
content=valuesAndWidths)
titles = [column.format_title() for runSetResult in runSetResults for column in runSetResult.columns]
runSetWidths1 = [1]*sum(runSetWidths)
titleRow = tempita.bunch(id='columnTitles', name=commonFileNamePrefix,
content=list(zip(titles, runSetWidths1)))
return {'tool': get_row('Tool', '{tool} {version}', collapse=True),
'limit': get_row('Limits', 'timelimit: {timelimit}, memlimit: {memlimit}, CPU core limit: {cpuCores}', collapse=True),
'host': get_row('Host', '{host}', collapse=True, onlyIf='host'),
'os': get_row('OS', '{os}', collapse=True, onlyIf='os'),
'system': get_row('System', 'CPU: {cpu}, cores: {cores}, frequency: {freq}{turbo}; RAM: {ram}', collapse=True, onlyIf='cpu'),
'date': get_row('Date of execution', '{date}', collapse=True),
'runset': get_row('Run set', '{niceName}'),
'branch': get_row('Branch', '{branch}'),
'options': get_row('Options', '{options}'),
'property':get_row('Propertyfile', '{propertyfiles}', collapse=True, onlyIf='propertyfiles', default=''),
'title': titleRow}
|
ValueError
|
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/tablegenerator/__init__.py/get_table_head
|
9,604
|
def write_table_in_format(template_format, outfile, template_values, show_table):
# read template
Template = tempita.HTMLTemplate if template_format == 'html' else tempita.Template
template_file = TEMPLATE_FILE_NAME.format(format=template_format)
try:
template_content = __loader__.get_data(template_file).decode(TEMPLATE_ENCODING)
except __HOLE__:
with open(template_file, mode='r') as f:
template_content = f.read()
template = Template(template_content, namespace=TEMPLATE_NAMESPACE)
result = template.substitute(**template_values)
# write file
if not outfile:
print(result, end='')
else:
with open(outfile, 'w') as file:
file.write(result)
if show_table:
try:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['xdg-open', outfile],
stdout=devnull, stderr=devnull)
except OSError:
pass
|
NameError
|
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/tablegenerator/__init__.py/write_table_in_format
|
9,605
|
def main(args=None):
if sys.version_info < (3,):
sys.exit('table-generator needs Python 3 to run.')
signal.signal(signal.SIGINT, sigint_handler)
arg_parser = create_argument_parser()
options = arg_parser.parse_args((args or sys.argv)[1:])
logging.basicConfig(format="%(levelname)s: %(message)s",
level=logging.WARNING if options.quiet else logging.INFO)
global parallel
import concurrent.futures
cpu_count = 1
try:
cpu_count = os.cpu_count() or 1
except __HOLE__:
pass
# Use up to cpu_count*2 workers because some tasks are I/O bound.
parallel = concurrent.futures.ProcessPoolExecutor(max_workers=cpu_count*2)
name = options.output_name
outputPath = options.outputPath
if outputPath == '-':
# write to stdout
outputFilePattern = '-'
outputPath = '.'
else:
outputFilePattern = "{name}.{type}.{ext}"
if options.xmltablefile:
if options.tables:
arg_parser.error("Invalid additional arguments '{}'.".format(" ".join(options.tables)))
runSetResults = parse_table_definition_file(options.xmltablefile, options)
if not name:
name = basename_without_ending(options.xmltablefile)
if not outputPath:
outputPath = os.path.dirname(options.xmltablefile)
else:
if options.tables:
inputFiles = options.tables
else:
searchDir = outputPath or DEFAULT_OUTPUT_PATH
logging.info("Searching result files in '%s'...", searchDir)
inputFiles = [os.path.join(searchDir, '*.results*.xml')]
inputFiles = Util.extend_file_list(inputFiles) # expand wildcards
runSetResults = parallel.map(load_result,
inputFiles, itertools.repeat(options))
if len(inputFiles) == 1:
if not name:
name = basename_without_ending(inputFiles[0])
if not outputFilePattern == '-':
outputFilePattern = "{name}.{ext}"
else:
if not name:
name = NAME_START + "." + time.strftime("%Y-%m-%d_%H%M", time.localtime())
if inputFiles and not outputPath:
path = os.path.dirname(inputFiles[0])
if not "://" in path and all(path == os.path.dirname(file) for file in inputFiles):
outputPath = path
else:
outputPath = DEFAULT_OUTPUT_PATH
if not outputPath:
outputPath = '.'
runSetResults = [r for r in runSetResults if r is not None]
if not runSetResults:
logging.error('No benchmark results found.')
exit(1)
logging.info('Merging results...')
if options.common:
find_common_tasks(runSetResults)
else:
# merge list of run sets, so that all run sets contain the same tasks
merge_tasks(runSetResults)
rows = get_rows(runSetResults)
if not rows:
logging.warning('No results found, no tables produced.')
exit()
rowsDiff = filter_rows_with_differences(rows) if options.write_diff_table else []
logging.info('Generating table...')
if not os.path.isdir(outputPath) and not outputFilePattern == '-':
os.makedirs(outputPath)
futures = create_tables(name, runSetResults, rows, rowsDiff, outputPath, outputFilePattern, options)
if options.dump_counts: # print some stats for Buildbot
print("REGRESSIONS {}".format(get_regression_count(rows, options.ignoreFlappingTimeouts)))
countsList = get_counts(rows)
print("STATS")
for counts in countsList:
print(" ".join(str(e) for e in counts))
for f in futures:
f.result() # to get any exceptions that may have occurred
logging.info('done')
parallel.shutdown(wait=True)
|
AttributeError
|
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/tablegenerator/__init__.py/main
|
9,606
|
def day_selected(self, event):
"""
Event for when calendar is selected, update/create date string.
"""
date = event.GetDate()
# WX sometimes has year == 0 temporarily when doing state changes.
if date.IsValid() and date.GetYear() != 0:
year = date.GetYear()
# wx 2.8.8 has 0-indexed months.
month = date.GetMonth() + 1
day = date.GetDay()
try:
self.value = datetime.date(year, month, day)
except __HOLE__:
print 'Invalid date:', year, month, day
raise
return
|
ValueError
|
dataset/ETHPy150Open enthought/traitsui/traitsui/wx/date_editor.py/SimpleEditor.day_selected
|
9,607
|
def selected_list_changed(self, evt=None):
""" Update the date colors of the days in the widgets. """
for cal in self.cal_ctrls:
cur_month = cal.GetDate().GetMonth() + 1
cur_year = cal.GetDate().GetYear()
selected_days = self.selected_days
# When multi_select is False wrap in a list to pass the for-loop.
if self.multi_select == False:
if selected_days == None:
selected_days = []
else:
selected_days = [selected_days]
# Reset all the days to the correct colors.
for day in range(1,32):
try:
paint_day = datetime.date(cur_year, cur_month, day)
if not self.allow_future and paint_day > self.today:
attr = wx.calendar.CalendarDateAttr(colText=UNAVAILABLE_FG)
cal.SetAttr(day, attr)
elif paint_day in selected_days:
attr = wx.calendar.CalendarDateAttr(colText=SELECTED_FG)
cal.SetAttr(day, attr)
else:
cal.ResetAttr(day)
except __HOLE__:
# Blindly creating Date objects sometimes produces invalid.
pass
cal.highlight_changed()
return
|
ValueError
|
dataset/ETHPy150Open enthought/traitsui/traitsui/wx/date_editor.py/MultiCalendarCtrl.selected_list_changed
|
9,608
|
def _weekday_clicked(self, evt):
""" A day on the weekday bar has been clicked. Select all days. """
evt.Skip()
weekday = evt.GetWeekDay()
cal = evt.GetEventObject()
month = cal.GetDate().GetMonth()+1
year = cal.GetDate().GetYear()
days = []
# Messy math to compute the dates of each weekday in the month.
# Python uses Monday=0, while wx uses Sunday=0.
month_start_weekday = (datetime.date(year, month, 1).weekday()+1) %7
weekday_offset = (weekday - month_start_weekday) % 7
for day in range(weekday_offset, 31, 7):
try:
day = datetime.date(year, month, day+1)
if self.allow_future or day <= self.today:
days.append(day)
except __HOLE__:
pass
self.add_days_to_selection(days)
self.selected_list_changed()
return
|
ValueError
|
dataset/ETHPy150Open enthought/traitsui/traitsui/wx/date_editor.py/MultiCalendarCtrl._weekday_clicked
|
9,609
|
def runTest(self):
try:
actual = openid.urinorm.urinorm(self.case)
except __HOLE__, why:
self.assertEqual(self.expected, 'fail', why)
else:
self.assertEqual(actual, self.expected)
|
ValueError
|
dataset/ETHPy150Open adieu/python-openid/openid/test/test_urinorm.py/UrinormTest.runTest
|
9,610
|
def jsonrpc_server_call(target, jsonrpc_request, json_decoder=None):
"""Execute the given JSON-RPC request (as JSON-encoded string) on the given
target object and return the JSON-RPC response, as a dict
"""
if json_decoder is None:
json_decoder = ScrapyJSONDecoder()
try:
req = json_decoder.decode(jsonrpc_request)
except Exception as e:
return jsonrpc_error(None, jsonrpc_errors.PARSE_ERROR, 'Parse error', \
traceback.format_exc())
try:
id, methname = req['id'], req['method']
except __HOLE__:
return jsonrpc_error(None, jsonrpc_errors.INVALID_REQUEST, 'Invalid Request')
try:
method = getattr(target, methname)
except AttributeError:
return jsonrpc_error(id, jsonrpc_errors.METHOD_NOT_FOUND, 'Method not found')
params = req.get('params', [])
a, kw = ([], params) if isinstance(params, dict) else (params, {})
kw = dict([(str(k), v) for k, v in kw.items()]) # convert kw keys to str
try:
return jsonrpc_result(id, method(*a, **kw))
except Exception as e:
return jsonrpc_error(id, jsonrpc_errors.INTERNAL_ERROR, str(e), \
traceback.format_exc())
|
KeyError
|
dataset/ETHPy150Open wcong/ants/ants/utils/jsonrpc.py/jsonrpc_server_call
|
9,611
|
def load_module(module_name):
""" Dynamically load a module
TODO:
Throw a custom error and deal with better.
Returns:
module: an imported module name
"""
try:
module = resolve_name(module_name)
except __HOLE__:
raise error.NotFound(msg=module_name)
return module
|
ImportError
|
dataset/ETHPy150Open panoptes/POCS/panoptes/utils/modules.py/load_module
|
9,612
|
def run_command(command, input=None):
if not isinstance(command, list):
command = shlex.split(command)
if not input:
input = None
elif isinstance(input, unicode_type):
input = input.encode('utf-8')
elif not isinstance(input, binary_type):
input = input.read()
try:
pipe = subprocess.Popen(command,
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except __HOLE__:
return (None, None, -1)
(output, stderr) = pipe.communicate(input=input)
(output, stderr) = (c.decode('utf-8', errors='ignore') for c in (output, stderr))
return (output, stderr, pipe.returncode)
|
OSError
|
dataset/ETHPy150Open opencollab/debile/debile/utils/commands.py/run_command
|
9,613
|
def view_debater(request, debater_id):
debater_id = int(debater_id)
try:
debater = Debater.objects.get(pk=debater_id)
except Debater.DoesNotExist:
return render_to_response('error.html',
{'error_type': "View Debater",
'error_name': str(debater_id),
'error_info':"No such debater"},
context_instance=RequestContext(request))
if request.method == 'POST':
form = DebaterForm(request.POST,instance=debater)
if form.is_valid():
try:
form.save()
except __HOLE__:
return render_to_response('error.html',
{'error_type': "Debater",
'error_name': "["+form.cleaned_data['name']+"]",
'error_info':"Debater name cannot be validated, most likely a non-existent debater"},
context_instance=RequestContext(request))
return render_to_response('thanks.html',
{'data_type': "Debater",
'data_name': "["+form.cleaned_data['name']+"]"},
context_instance=RequestContext(request))
else:
rounds = RoundStats.objects.filter(debater=debater)
rounds = sorted(list(rounds), key=lambda x: x.round.round_number)
form = DebaterForm(instance=debater)
# Really only should be one, TODO: change to get when we have tests
teams = Team.objects.filter(debaters = debater)
links = [('/debater/'+str(debater_id)+'/delete/', 'Delete', True)]
for team in teams:
links.append(('/team/'+str(team.id)+'/', "View %s"%team.name, False))
return render_to_response('data_entry.html',
{'form': form,
'debater_obj': debater,
'links': links,
'debater_rounds': rounds,
'title':"Viewing Debater: %s"%(debater.name)},
context_instance=RequestContext(request))
|
ValueError
|
dataset/ETHPy150Open jolynch/mit-tab/mittab/apps/tab/debater_views.py/view_debater
|
9,614
|
def enter_debater(request):
if request.method == 'POST':
form = DebaterForm(request.POST)
if form.is_valid():
try:
form.save()
except __HOLE__:
return render_to_response('error.html',
{'error_type': "Debater",'error_name': "["+form.cleaned_data['name']+"]",
'error_info':"Debater name cannot be validated, most likely a duplicate debater"},
context_instance=RequestContext(request))
return render_to_response('thanks.html',
{'data_type': "Debater",
'data_name': "["+form.cleaned_data['name']+"]",
'data_modification': "CREATED",
'enter_again': True},
context_instance=RequestContext(request))
else:
form = DebaterForm()
return render_to_response('data_entry.html',
{'form': form,
'title': "Create Debater:"},
context_instance=RequestContext(request))
|
ValueError
|
dataset/ETHPy150Open jolynch/mit-tab/mittab/apps/tab/debater_views.py/enter_debater
|
9,615
|
def _is_valid_key(self, key):
"""Validate a proposed dictionary key
Parameters
----------
key : object
Returns
-------
boolean
"""
# The key should be a tuple
if not isinstance(key, tuple):
return False
# The tuple should be a triplet
if len(key) != 3:
return False
# The triplet should be a pair of axelrod.Player sublclasses and an
# integer
try:
if not (
issubclass(key[0], Player) and
issubclass(key[1], Player) and
isinstance(key[2], int)
):
return False
except __HOLE__:
return False
# Each Player class should be deterministic
if key[0].classifier['stochastic'] or key[1].classifier['stochastic']:
return False
return True
|
TypeError
|
dataset/ETHPy150Open Axelrod-Python/Axelrod/axelrod/deterministic_cache.py/DeterministicCache._is_valid_key
|
9,616
|
def test_is_in_failure(self):
try:
assert_that(4).is_in(1,2,3)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('Expected <4> to be in (1, 2, 3), but was not.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_in.py/TestIn.test_is_in_failure
|
9,617
|
def test_is_in_missing_arg_failure(self):
try:
assert_that(1).is_in()
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('one or more args must be given')
|
ValueError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_in.py/TestIn.test_is_in_missing_arg_failure
|
9,618
|
def test_is_not_in_failure(self):
try:
assert_that(1).is_not_in(1,2,3)
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('Expected <1> to not be in (1, 2, 3), but was.')
|
AssertionError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_in.py/TestIn.test_is_not_in_failure
|
9,619
|
def test_is_not_in_missing_arg_failure(self):
try:
assert_that(1).is_not_in()
fail('should have raised error')
except __HOLE__ as ex:
assert_that(str(ex)).is_equal_to('one or more args must be given')
|
ValueError
|
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_in.py/TestIn.test_is_not_in_missing_arg_failure
|
9,620
|
def get_config_loader(path, request=None):
i = path.rfind('.')
module, attr = path[:i], path[i + 1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured(
'Error importing SAML config loader %s: "%s"' % (path, e))
except __HOLE__, e:
raise ImproperlyConfigured(
'Error importing SAML config loader. Is SAML_CONFIG_LOADER '
'a correctly string with a callable path?'
)
try:
config_loader = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured(
'Module "%s" does not define a "%s" config loader' %
(module, attr)
)
if not hasattr(config_loader, '__call__'):
raise ImproperlyConfigured(
"SAML config loader must be a callable object.")
return config_loader
|
ValueError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/djangosaml2-0.13.0/djangosaml2/conf.py/get_config_loader
|
9,621
|
def save_changelog_del(sender, instance, using, **kwargs):
if sender in settings.MODELS and using == settings.LOCAL:
cl = ChangeLog.objects.create(object=instance, action=DELETION)
try:
k = repr(instance.natural_key())
DeleteKey.objects.create(changelog=cl, key=k)
except __HOLE__:
pass
|
AttributeError
|
dataset/ETHPy150Open zlorf/django-synchro/synchro/handlers.py/save_changelog_del
|
9,622
|
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
srs_type = 'user'
if isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except __HOLE__:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
|
ValueError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/gis/gdal/srs.py/SpatialReference.__init__
|
9,623
|
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (__HOLE__, ValueError):
return None
#### Unit Properties ####
|
TypeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/gis/gdal/srs.py/SpatialReference.srid
|
9,624
|
def get_species_list(self):
"""Return a formatted list of all species."""
c = self.assets.db.cursor()
c.execute("select distinct name from assets where type = 'species' order by name")
names = [x[0] for x in c.fetchall()]
formatted = []
for s in names:
if s == "dummy":
continue
try:
formatted.append(s[0].upper() + s[1:])
except __HOLE__:
formatted.append(s)
logging.exception("Unable to format species: %s", s)
return formatted
|
IndexError
|
dataset/ETHPy150Open wizzomafizzo/starcheat/starcheat/assets/species.py/Species.get_species_list
|
9,625
|
def get_appearance_data(self, name, gender, key):
species = self.get_species(name)
# there is another json extension here where strings that have a , on
# the end are treated as 1 item lists. there are also some species with
# missing keys
try:
results = self.get_gender_data(species, gender)[key]
except __HOLE__:
return []
if type(results) is str:
return (results,)
else:
return results
|
KeyError
|
dataset/ETHPy150Open wizzomafizzo/starcheat/starcheat/assets/species.py/Species.get_appearance_data
|
9,626
|
def get_preview_image(self, name, gender):
"""Return raw image data for species placeholder pic.
I don't think this is actually used anywhere in game. Some mods don't
include it."""
species = self.get_species(name.lower())
try:
try:
key = self.get_gender_data(species, gender)["characterImage"]
except __HOLE__:
return None
return self.assets.read(key, species[0][1], image=True)
except FileNotFoundError:
# corrupt save, no race set
logging.warning("No race set on player")
return None
|
TypeError
|
dataset/ETHPy150Open wizzomafizzo/starcheat/starcheat/assets/species.py/Species.get_preview_image
|
9,627
|
def render_player(self, player, armor=True):
"""Return an Image of a fully rendered player from a save."""
name = player.get_race()
gender = player.get_gender()
species = self.get_species(name.lower())
if species is None:
return Image.open(BytesIO(self.assets.items().missing_icon()))
asset_loc = species[0][1]
# crop the spritesheets and replace colours
def grab_sprite(sheet_path, rect, directives):
sheet = self.assets.read(sheet_path, asset_loc, True)
img = Image.open(BytesIO(sheet)).convert("RGBA").crop(rect)
if directives != "":
img = replace_colors(img, unpack_color_directives(directives))
return img
default_rect = (43, 0, 86, 43)
# TODO: should use the .bbox to figure this out
personality = player.get_personality()
personality_offset = int(re.search("\d$", personality).group(0)) * 43
body_rect = (personality_offset, 0, personality_offset+43, 43)
body_img = grab_sprite("/humanoid/%s/%sbody.png" % (name, gender),
body_rect,
player.get_body_directives())
frontarm_img = grab_sprite("/humanoid/%s/frontarm.png" % name,
body_rect,
player.get_body_directives())
backarm_img = grab_sprite("/humanoid/%s/backarm.png" % name,
body_rect,
player.get_body_directives())
head_img = grab_sprite("/humanoid/%s/%shead.png" % (name, gender),
default_rect,
player.get_body_directives())
hair = player.get_hair()
hair_img = None
if hair[0] != "":
hair_img = self.get_hair_image(
name, hair[0],
hair[1], gender,
player.get_hair_directives()
)
facial_hair = player.get_facial_hair()
facial_hair_img = None
if facial_hair[0] != "":
facial_hair_img = self.get_hair_image(
name, facial_hair[0],
facial_hair[1], gender,
player.get_facial_hair_directives()
)
facial_mask = player.get_facial_mask()
facial_mask_img = None
if facial_mask[0] != "":
facial_mask_img = self.get_hair_image(
name, facial_mask[0],
facial_mask[1], gender,
player.get_facial_mask_directives()
)
head_slot = player.get_visible("head")
chest_slot = player.get_visible("chest")
legs_slot = player.get_visible("legs")
back_slot = player.get_visible("back")
do_head = armor and head_slot is not None
# new blank canvas!
base_size = 43
base = Image.new("RGBA", (base_size, base_size))
# the order of these is important!
# back arm
base.paste(backarm_img)
if armor and chest_slot is not None:
base = self.render_chest(player, base, chest_slot, "bsleeve")
# backpack
if armor and back_slot is not None:
base = self.render_part(player, base, "back", back_slot)
# then the head
base.paste(head_img, mask=head_img)
# TODO: support mask on head items
if hair_img is not None:
try:
base.paste(hair_img, mask=hair_img)
except __HOLE__:
logging.exception("Bad hair image: %s, %s", hair[0], hair[1])
# body
base.paste(body_img, mask=body_img)
if armor and legs_slot is not None:
base = self.render_part(player, base, "legs", legs_slot)
if armor and chest_slot is not None:
base = self.render_chest(player, base, chest_slot, "body")
# front arm
base.paste(frontarm_img, mask=frontarm_img)
if armor and chest_slot is not None:
base = self.render_chest(player, base, chest_slot, "fsleeve")
# facial mask if set
if facial_mask_img is not None:
try:
base.paste(facial_mask_img, mask=facial_mask_img)
except ValueError:
logging.exception("Bad facial mask image: %s, %s",
facial_mask[0], facial_mask[1])
# facial hair if set
if facial_hair_img is not None:
try:
base.paste(facial_hair_img, mask=facial_hair_img)
except ValueError:
logging.exception("Bad facial hair image: %s, %s",
facial_hair[0], facial_hair[1])
if do_head:
base = self.render_part(player, base, "head", head_slot)
return base.resize((base_size*3, base_size*3))
|
ValueError
|
dataset/ETHPy150Open wizzomafizzo/starcheat/starcheat/assets/species.py/Species.render_player
|
9,628
|
def get_hair_image(self, name, hair_type, hair_group, gender, directives):
# TODO: bbox is from .frame file, need a way to read them still
species = self.get_species(name.lower())
image_path = "/humanoid/%s/%s/%s.png" % (name, hair_type, hair_group)
try:
image = self.assets.read(image_path, species[0][1], image=True)
image = Image.open(BytesIO(image)).convert("RGBA").crop((43, 0,
86, 43))
return replace_colors(image, unpack_color_directives(directives))
except __HOLE__:
logging.exception("Missing hair image: %s", image_path)
return
|
OSError
|
dataset/ETHPy150Open wizzomafizzo/starcheat/starcheat/assets/species.py/Species.get_hair_image
|
9,629
|
def get_field(self):
try:
field = self.REQUEST['sort']
except (KeyError, ValueError, __HOLE__):
field = ''
return (self.direction == 'desc' and '-' or '') + field
|
TypeError
|
dataset/ETHPy150Open directeur/django-sorting/django_sorting/middleware.py/get_field
|
9,630
|
def get_direction(self):
try:
return self.REQUEST['dir']
except (KeyError, ValueError, __HOLE__):
return 'desc'
|
TypeError
|
dataset/ETHPy150Open directeur/django-sorting/django_sorting/middleware.py/get_direction
|
9,631
|
def persist(self):
'''
Persist the modified schedule into <<configdir>>/minion.d/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
try:
with salt.utils.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.to_bytes(
yaml.dump({'schedule': self.opts['schedule']})
)
)
except (IOError, __HOLE__):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
|
OSError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/schedule.py/Schedule.persist
|
9,632
|
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.is_windows():
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
self.functions = salt.loader.minion_mods(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid()}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
salt.utils.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
for basefilename in os.listdir(salt.minion.get_proc_dir(self.opts['cachedir'])):
fn_ = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']), basefilename)
if not os.path.exists(fn_):
log.debug('schedule.handle_func: {0} was processed '
'in another thread, skipping.'.format(
basefilename))
continue
with salt.utils.fopen(fn_, 'rb') as fp_:
job = salt.payload.Serial(self.opts).load(fp_)
if job:
if 'schedule' in job:
log.debug('schedule.handle_func: Checking job against '
'fun {0}: {1}'.format(ret['fun'], job))
if ret['schedule'] == job['schedule'] and os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, now '
'{0}, maxrunning is {1}'.format(
jobcount, data['maxrunning']))
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job {0} '
'was not started, {1} already running'.format(
ret['schedule'], data['maxrunning']))
return False
else:
try:
log.info('Invalid job file found. Removing.')
os.remove(fn_)
except OSError:
log.info('Unable to remove file: {0}.'.format(fn_))
if multiprocessing_enabled and not salt.utils.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
ret['pid'] = os.getpid()
if 'jid_include' not in data or data['jid_include']:
log.debug('schedule.handle_func: adding this job to the jobcache '
'with data {0}'.format(ret))
# write this to /var/cache/salt/minion/proc
with salt.utils.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
kwargs['__pub_{0}'.format(key)] = val
ret['return'] = self.functions[func](*args, **kwargs)
data_returner = data.get('returner', None)
if data_returner or self.schedule_returner:
if 'returner_config' in data:
ret['ret_config'] = data['returner_config']
if 'returner_kwargs' in data:
ret['ret_kwargs'] = data['returner_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, str):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
ret['success'] = True
self.returners[ret_str](ret)
else:
log.info(
'Job {0} using invalid returner: {1}. Ignoring.'.format(
func, returner
)
)
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
except Exception:
log.exception("Unhandled exception running {0}".format(ret['fun']))
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
try:
# Only attempt to return data to the master
# if the scheduled job is running on a minion.
if '__role' in self.opts and self.opts['__role'] == 'minion':
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
mret['jid'] = 'req'
event = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
event.fire_event(load, '__schedule_return')
log.debug('schedule.handle_func: Removing {0}'.format(proc_fn))
os.unlink(proc_fn)
except __HOLE__ as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '{0}': {1}".format(proc_fn, exc.errno))
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
exit(salt.defaults.exitcodes.EX_GENERIC)
|
OSError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/schedule.py/Schedule.handle_func
|
9,633
|
def eval(self):
'''
Evaluate and execute the schedule
'''
schedule = self.option('schedule')
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'enabled' in schedule and not schedule['enabled']:
return
for job, data in six.iteritems(schedule):
if job == 'enabled' or not data:
continue
if not isinstance(data, dict):
log.error('Scheduled job "{0}" should have a dict value, not {1}'.format(job, type(data)))
continue
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if func not in self.functions:
log.info(
'Invalid function: {0} in scheduled job {1}.'.format(
func, job
)
)
if 'name' not in data:
data['name'] = job
# Add up how many seconds between now and then
when = 0
seconds = 0
cron = 0
now = int(time.time())
if 'until' in data:
if not _WHEN_SUPPORTED:
log.error('Missing python-dateutil.'
'Ignoring until.')
else:
until__ = dateutil_parser.parse(data['until'])
until = int(time.mktime(until__.timetuple()))
if until <= now:
log.debug('Until time has passed '
'skipping job: {0}.'.format(data['name']))
continue
if 'after' in data:
if not _WHEN_SUPPORTED:
log.error('Missing python-dateutil.'
'Ignoring after.')
else:
after__ = dateutil_parser.parse(data['after'])
after = int(time.mktime(after__.timetuple()))
if after >= now:
log.debug('After time has not passed '
'skipping job: {0}.'.format(data['name']))
continue
# Used for quick lookups when detecting invalid option combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [set(i)
for i in itertools.combinations(scheduling_elements, 2)]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error('Unable to use "{0}" options together. Ignoring.'
.format('", "'.join(scheduling_elements)))
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error('Unable to use "{0}" with "{1}" options. Ignoring'
.format('", "'.join(time_elements),
'", "'.join(scheduling_elements)))
continue
if True in [True for item in time_elements if item in data]:
# Add up how many seconds between now and then
seconds += int(data.get('seconds', 0))
seconds += int(data.get('minutes', 0)) * 60
seconds += int(data.get('hours', 0)) * 3600
seconds += int(data.get('days', 0)) * 86400
elif 'once' in data:
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'], once_fmt)
once = int(time.mktime(once.timetuple()))
except (TypeError, ValueError):
log.error('Date string could not be parsed: %s, %s',
data['once'], once_fmt)
continue
if now != once:
continue
else:
seconds = 1
elif 'when' in data:
if not _WHEN_SUPPORTED:
log.error('Missing python-dateutil.'
'Ignoring job {0}'.format(job))
continue
if isinstance(data['when'], list):
_when = []
for i in data['when']:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
log.error('Pillar item "whens" must be dict.'
'Ignoring')
continue
__when = self.opts['pillar']['whens'][i]
try:
when__ = dateutil_parser.parse(__when)
except ValueError:
log.error('Invalid date string. Ignoring')
continue
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
log.error('Grain "whens" must be dict.'
'Ignoring')
continue
__when = self.opts['grains']['whens'][i]
try:
when__ = dateutil_parser.parse(__when)
except ValueError:
log.error('Invalid date string. Ignoring')
continue
else:
try:
when__ = dateutil_parser.parse(i)
except ValueError:
log.error('Invalid date string {0}.'
'Ignoring job {1}.'.format(i, job))
continue
when = int(time.mktime(when__.timetuple()))
if when >= now:
_when.append(when)
_when.sort()
if _when:
# Grab the first element
# which is the next run time
when = _when[0]
# If we're switching to the next run in a list
# ensure the job can run
if '_when' in data and data['_when'] != when:
data['_when_run'] = True
data['_when'] = when
seconds = when - now
# scheduled time is in the past
if seconds < 0:
continue
if '_when_run' not in data:
data['_when_run'] = True
# Backup the run time
if '_when' not in data:
data['_when'] = when
# A new 'when' ensure _when_run is True
if when > data['_when']:
data['_when'] = when
data['_when_run'] = True
else:
continue
else:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
data['when'] in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'], dict):
log.error('Pillar item "whens" must be dict.'
'Ignoring')
continue
_when = self.opts['pillar']['whens'][data['when']]
try:
when__ = dateutil_parser.parse(_when)
except ValueError:
log.error('Invalid date string. Ignoring')
continue
elif ('whens' in self.opts['grains'] and
data['when'] in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'], dict):
log.error('Grain "whens" must be dict. Ignoring')
continue
_when = self.opts['grains']['whens'][data['when']]
try:
when__ = dateutil_parser.parse(_when)
except ValueError:
log.error('Invalid date string. Ignoring')
continue
else:
try:
when__ = dateutil_parser.parse(data['when'])
except ValueError:
log.error('Invalid date string. Ignoring')
continue
when = int(time.mktime(when__.timetuple()))
now = int(time.time())
seconds = when - now
# scheduled time is in the past
if seconds < 0:
continue
if '_when_run' not in data:
data['_when_run'] = True
# Backup the run time
if '_when' not in data:
data['_when'] = when
# A new 'when' ensure _when_run is True
if when > data['_when']:
data['_when'] = when
data['_when_run'] = True
elif 'cron' in data:
if not _CRON_SUPPORTED:
log.error('Missing python-croniter. Ignoring job {0}'.format(job))
continue
now = int(time.mktime(datetime.datetime.now().timetuple()))
try:
cron = int(croniter.croniter(data['cron'], now).get_next())
except (ValueError, KeyError):
log.error('Invalid cron string. Ignoring')
continue
seconds = cron - now
else:
continue
# Check if the seconds variable is lower than current lowest
# loop interval needed. If it is lower than overwrite variable
# external loops using can then check this variable for how often
# they need to reschedule themselves
# Not used with 'when' parameter, causes run away jobs and CPU
# spikes.
if 'when' not in data:
if seconds < self.loop_interval:
self.loop_interval = seconds
run = False
if 'splay' in data:
if 'when' in data:
log.error('Unable to use "splay" with "when" option at this time. Ignoring.')
elif 'cron' in data:
log.error('Unable to use "splay" with "cron" option at this time. Ignoring.')
else:
if '_seconds' not in data:
log.debug('The _seconds parameter is missing, '
'most likely the first run or the schedule '
'has been refreshed refresh.')
if 'seconds' in data:
data['_seconds'] = data['seconds']
else:
data['_seconds'] = 0
if job in self.intervals:
if 'when' in data:
if seconds == 0:
if data['_when_run']:
data['_when_run'] = False
run = True
elif 'cron' in data:
if seconds == 1:
run = True
else:
if now - self.intervals[job] >= seconds:
run = True
else:
if 'when' in data:
if seconds == 0:
if data['_when_run']:
data['_when_run'] = False
run = True
elif 'cron' in data:
if seconds == 1:
run = True
else:
# If run_on_start is True, the job will run when the Salt
# minion start. If the value is False will run at the next
# scheduled run. Default is True.
if 'run_on_start' in data:
if data['run_on_start']:
run = True
else:
self.intervals[job] = int(time.time())
else:
run = True
if run:
if 'range' in data:
if not _RANGE_SUPPORTED:
log.error('Missing python-dateutil. Ignoring job {0}'.format(job))
continue
else:
if isinstance(data['range'], dict):
try:
start = int(time.mktime(dateutil_parser.parse(data['range']['start']).timetuple()))
except ValueError:
log.error('Invalid date string for start. Ignoring job {0}.'.format(job))
continue
try:
end = int(time.mktime(dateutil_parser.parse(data['range']['end']).timetuple()))
except __HOLE__:
log.error('Invalid date string for end. Ignoring job {0}.'.format(job))
continue
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
run = True
else:
run = False
else:
if now >= start and now <= end:
run = True
else:
run = False
else:
log.error('schedule.handle_func: Invalid range, end must be larger than start. \
Ignoring job {0}.'.format(job))
continue
else:
log.error('schedule.handle_func: Invalid, range must be specified as a dictionary. \
Ignoring job {0}.'.format(job))
continue
if not run:
continue
else:
if 'splay' in data:
if 'when' in data:
log.error('Unable to use "splay" with "when" option at this time. Ignoring.')
else:
if isinstance(data['splay'], dict):
if data['splay']['end'] >= data['splay']['start']:
splay = random.randint(data['splay']['start'], data['splay']['end'])
else:
log.error('schedule.handle_func: Invalid Splay, end must be larger than start. \
Ignoring splay.')
splay = None
else:
splay = random.randint(0, data['splay'])
if splay:
log.debug('schedule.handle_func: Adding splay of '
'{0} seconds to next run.'.format(splay))
if 'seconds' in data:
data['seconds'] = data['_seconds'] + splay
else:
data['seconds'] = 0 + splay
log.info('Running scheduled job: {0}'.format(job))
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: This job was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)')
if 'maxrunning' in data:
log.debug('schedule: This job was scheduled with a max '
'number of {0}'.format(data['maxrunning']))
else:
log.info('schedule: maxrunning parameter was not specified for '
'job {0}, defaulting to 1.'.format(job))
data['maxrunning'] = 1
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if salt.utils.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
try:
if multiprocessing_enabled:
thread_cls = SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data))
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
else:
proc.start()
if multiprocessing_enabled:
proc.join()
finally:
self.intervals[job] = now
if salt.utils.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
|
ValueError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/schedule.py/Schedule.eval
|
9,634
|
def clean_proc_dir(opts):
'''
Loop through jid files in the minion proc directory (default /var/cache/salt/minion/proc)
and remove any that refer to processes that no longer exist
'''
for basefilename in os.listdir(salt.minion.get_proc_dir(opts['cachedir'])):
fn_ = os.path.join(salt.minion.get_proc_dir(opts['cachedir']), basefilename)
with salt.utils.fopen(fn_, 'rb') as fp_:
job = None
try:
job = salt.payload.Serial(opts).load(fp_)
except Exception: # It's corrupted
# Windows cannot delete an open file
if salt.utils.is_windows():
fp_.close()
try:
os.unlink(fn_)
continue
except OSError:
continue
log.debug('schedule.clean_proc_dir: checking job {0} for process '
'existence'.format(job))
if job is not None and 'pid' in job:
if salt.utils.process.os_is_running(job['pid']):
log.debug('schedule.clean_proc_dir: Cleaning proc dir, '
'pid {0} still exists.'.format(job['pid']))
else:
# Windows cannot delete an open file
if salt.utils.is_windows():
fp_.close()
# Maybe the file is already gone
try:
os.unlink(fn_)
except __HOLE__:
pass
|
OSError
|
dataset/ETHPy150Open saltstack/salt/salt/utils/schedule.py/clean_proc_dir
|
9,635
|
def setup (**attrs):
"""The gateway to the Distutils: do everything your setup script needs
to do, in a highly flexible and user-driven way. Briefly: create a
Distribution instance; find and parse config files; parse the command
line; run each Distutils command found there, customized by the options
supplied to 'setup()' (as keyword arguments), in config files, and on
the command line.
The Distribution instance might be an instance of a class supplied via
the 'distclass' keyword argument to 'setup'; if no such class is
supplied, then the Distribution class (in dist.py) is instantiated.
All other arguments to 'setup' (except for 'cmdclass') are used to set
attributes of the Distribution instance.
The 'cmdclass' argument, if supplied, is a dictionary mapping command
names to command classes. Each command encountered on the command line
will be turned into a command class, which is in turn instantiated; any
class found in 'cmdclass' is used in place of the default, which is
(for command 'foo_bar') class 'foo_bar' in module
'distutils.command.foo_bar'. The command class must provide a
'user_options' attribute which is a list of option specifiers for
'distutils.fancy_getopt'. Any command-line options between the current
and the next command are used to set attributes of the current command
object.
When the entire command-line has been successfully parsed, calls the
'run()' method on each command object in turn. This method will be
driven entirely by the Distribution object (which each command object
has a reference to, thanks to its constructor), and the
command-specific options that became attributes of each command
object.
"""
global _setup_stop_after, _setup_distribution
# Determine the distribution class -- either caller-supplied or
# our Distribution (see below).
klass = attrs.get('distclass')
if klass:
del attrs['distclass']
else:
klass = Distribution
if not attrs.has_key('script_name'):
attrs['script_name'] = os.path.basename(sys.argv[0])
if not attrs.has_key('script_args'):
attrs['script_args'] = sys.argv[1:]
# Create the Distribution instance, using the remaining arguments
# (ie. everything except distclass) to initialize it
try:
_setup_distribution = dist = klass(attrs)
except DistutilsSetupError, msg:
if attrs.has_key('name'):
raise SystemExit, "error in %s setup command: %s" % \
(attrs['name'], msg)
else:
raise SystemExit, "error in setup command: %s" % msg
if _setup_stop_after == "init":
return dist
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
if DEBUG:
print "options (after parsing config files):"
dist.dump_option_dicts()
if _setup_stop_after == "config":
return dist
# Parse the command line; any command-line errors are the end user's
# fault, so turn them into SystemExit to suppress tracebacks.
try:
ok = dist.parse_command_line()
except DistutilsArgError, msg:
raise SystemExit, gen_usage(dist.script_name) + "\nerror: %s" % msg
if DEBUG:
print "options (after parsing command line):"
dist.dump_option_dicts()
if _setup_stop_after == "commandline":
return dist
# And finally, run all the commands found on the command line.
if ok:
try:
dist.run_commands()
except __HOLE__:
raise SystemExit, "interrupted"
except (IOError, os.error), exc:
error = grok_environment_error(exc)
if DEBUG:
sys.stderr.write(error + "\n")
raise
else:
raise SystemExit, error
except (DistutilsError,
CCompilerError), msg:
if DEBUG:
raise
else:
raise SystemExit, "error: " + str(msg)
return dist
# setup ()
|
KeyboardInterrupt
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/distutils/core.py/setup
|
9,636
|
def run_setup (script_name, script_args=None, stop_after="run"):
"""Run a setup script in a somewhat controlled environment, and
return the Distribution instance that drives things. This is useful
if you need to find out the distribution meta-data (passed as
keyword args from 'script' to 'setup()', or the contents of the
config files or command-line.
'script_name' is a file that will be run with 'execfile()';
'sys.argv[0]' will be replaced with 'script' for the duration of the
call. 'script_args' is a list of strings; if supplied,
'sys.argv[1:]' will be replaced by 'script_args' for the duration of
the call.
'stop_after' tells 'setup()' when to stop processing; possible
values:
init
stop after the Distribution instance has been created and
populated with the keyword arguments to 'setup()'
config
stop after config files have been parsed (and their data
stored in the Distribution instance)
commandline
stop after the command-line ('sys.argv[1:]' or 'script_args')
have been parsed (and the data stored in the Distribution)
run [default]
stop after all commands have been run (the same as if 'setup()'
had been called in the usual way
Returns the Distribution instance, which provides all information
used to drive the Distutils.
"""
if stop_after not in ('init', 'config', 'commandline', 'run'):
raise ValueError, "invalid value for 'stop_after': %r" % (stop_after,)
global _setup_stop_after, _setup_distribution
_setup_stop_after = stop_after
save_argv = sys.argv
g = {}
l = {}
try:
try:
sys.argv[0] = script_name
if script_args is not None:
sys.argv[1:] = script_args
execfile(script_name, g, l)
finally:
sys.argv = save_argv
_setup_stop_after = None
except __HOLE__:
# Hmm, should we do something if exiting with a non-zero code
# (ie. error)?
pass
except:
raise
if _setup_distribution is None:
raise RuntimeError, \
("'distutils.core.setup()' was never called -- "
"perhaps '%s' is not a Distutils setup script?") % \
script_name
# I wonder if the setup script's namespace -- g and l -- would be of
# any interest to callers?
#print "_setup_distribution:", _setup_distribution
return _setup_distribution
# run_setup ()
|
SystemExit
|
dataset/ETHPy150Open ofermend/medicare-demo/socialite/jython/Lib/distutils/core.py/run_setup
|
9,637
|
def _find_queue_item_id(self, master_base_url, changes_bid):
"""Looks in a Jenkins master's queue for an item, and returns the ID if found.
Args:
master_base_url (str): Jenkins master URL, in scheme://host form.
changes_bid (str): The identifier for this Jenkins build, typically the JobStep ID.
Returns:
str: Queue item id if found, otherwise None.
"""
xpath = QUEUE_ID_XPATH.format(job_id=changes_bid)
try:
response = self._get_text_response(
master_base_url=master_base_url,
path='/queue/api/xml/',
params={
'xpath': xpath,
'wrapper': 'x',
},
)
except NotFound:
return None
# it's possible that we managed to create multiple jobs in certain
# situations, so let's just get the newest one
try:
match = etree.fromstring(response).iter('id').next()
except __HOLE__:
return None
return match.text
|
StopIteration
|
dataset/ETHPy150Open dropbox/changes/changes/backends/jenkins/builder.py/JenkinsBuilder._find_queue_item_id
|
9,638
|
def _find_build_no(self, master_base_url, job_name, changes_bid):
"""Looks in a Jenkins master's list of current/recent builds for one with the given CHANGES_BID,
and returns the build number if found.
Args:
master_base_url (str): Jenkins master URL, in scheme://host form.
job_name (str): Name of the Jenkins project/job to look for the build in; ex: 'generic_build'.
changes_bid (str): The identifier for this Jenkins build, typically the JobStep ID.
Returns:
str: build number of the build if found, otherwise None.
"""
xpath = BUILD_ID_XPATH.format(job_id=changes_bid)
try:
response = self._get_text_response(
master_base_url=master_base_url,
path='/job/{job_name}/api/xml/'.format(job_name=job_name),
params={
'depth': 1,
'xpath': xpath,
'wrapper': 'x',
},
)
except NotFound:
return None
# it's possible that we managed to create multiple jobs in certain
# situations, so let's just get the newest one
try:
match = etree.fromstring(response).iter('number').next()
except __HOLE__:
return None
return match.text
|
StopIteration
|
dataset/ETHPy150Open dropbox/changes/changes/backends/jenkins/builder.py/JenkinsBuilder._find_build_no
|
9,639
|
def _get_jenkins_job(self, step):
try:
job_name = step.data['job_name']
build_no = step.data['build_no']
except __HOLE__:
raise UnrecoverableException('Missing Jenkins job information')
try:
return self._get_json_response(
step.data['master'],
'/job/{}/{}'.format(job_name, build_no),
)
except NotFound:
raise UnrecoverableException('Unable to find job in Jenkins')
|
KeyError
|
dataset/ETHPy150Open dropbox/changes/changes/backends/jenkins/builder.py/JenkinsBuilder._get_jenkins_job
|
9,640
|
def mark_dead(self, server):
with self._lock:
try:
self._servers.remove(server)
self._dead.insert(0, (time.time() + self._retry_time, server))
except __HOLE__:
pass
|
ValueError
|
dataset/ETHPy150Open aparo/pyes/pyes/connection.py/ServerSet.mark_dead
|
9,641
|
def get_terminal_size(defaultx=80, defaulty=25):
"""Return size of current terminal console.
This function try to determine actual size of current working
console window and return tuple (sizex, sizey) if success,
or default size (defaultx, defaulty) otherwise.
Dependencies: ctypes should be installed.
Author: Alexander Belchenko (e-mail: bialix AT ukr.net)
"""
try:
import ctypes
except __HOLE__:
return defaultx, defaulty
h = ctypes.windll.kernel32.GetStdHandle(-11)
csbi = ctypes.create_string_buffer(22)
res = ctypes.windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack(
"hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return (sizex, sizey)
else:
return (defaultx, defaulty)
|
ImportError
|
dataset/ETHPy150Open ipython/ipython-py3k/IPython/utils/terminal.py/get_terminal_size
|
9,642
|
def filedown(environ, filename, cache=True, cache_timeout=None,
action=None, real_filename=None, x_sendfile=False,
x_header_name=None, x_filename=None, fileobj=None,
default_mimetype='application/octet-stream'):
"""
@param filename: is used for display in download
@param real_filename: if used for the real file location
@param x_urlfile: is only used in x-sendfile, and be set to x-sendfile header
@param fileobj: if provided, then returned as file content
@type fileobj: (fobj, mtime, size)
filedown now support web server controlled download, you should set
xsendfile=True, and add x_header, for example:
nginx
('X-Accel-Redirect', '/path/to/local_url')
apache
('X-Sendfile', '/path/to/local_url')
"""
from werkzeug.http import parse_range_header
guessed_type = mimetypes.guess_type(filename)
mime_type = guessed_type[0] or default_mimetype
real_filename = real_filename or filename
#make common headers
headers = []
headers.append(('Content-Type', mime_type))
d_filename = _get_download_filename(environ, os.path.basename(filename))
if action == 'download':
headers.append(('Content-Disposition', 'attachment; %s' % d_filename))
elif action == 'inline':
headers.append(('Content-Disposition', 'inline; %s' % d_filename))
if x_sendfile:
if not x_header_name or not x_filename:
raise Exception("x_header_name or x_filename can't be empty")
headers.append((x_header_name, x_filename))
return Response('', status=200, headers=headers,
direct_passthrough=True)
else:
request = environ.get('werkzeug.request')
if request:
range = request.range
else:
range = parse_range_header(environ.get('HTTP_RANGE'))
#when request range,only recognize "bytes" as range units
if range and range.units=="bytes":
try:
fsize = os.path.getsize(real_filename)
except __HOLE__ as e:
return Response("Not found",status=404)
mtime = datetime.utcfromtimestamp(os.path.getmtime(real_filename))
mtime_str = http_date(mtime)
if cache:
etag = _generate_etag(mtime, fsize, real_filename)
else:
etag = mtime_str
if_range = environ.get('HTTP_IF_RANGE')
if if_range:
check_if_range_ok = (if_range.strip('"')==etag)
#print "check_if_range_ok (%s) = (%s ==%s)"%(check_if_range_ok,if_range.strip('"'),etag)
else:
check_if_range_ok = True
rbegin,rend = range.ranges[0]
if check_if_range_ok and (rbegin+1)<fsize:
if rend == None:
rend = fsize-1
headers.append(('Content-Length',str(rend-rbegin+1)))
#werkzeug do not count rend with the same way of rfc7233,so -1
headers.append(('Content-Range','%s %d-%d/%d' %(range.units,rbegin, rend-1, fsize)))
headers.append(('Last-Modified', mtime_str))
if cache:
headers.append(('ETag', '"%s"' % etag))
#for small file, read it to memory and return directly
#and this can avoid some issue with google chrome
if (rend-rbegin) < FileIterator.chunk_size:
s = "".join([chunk for chunk in FileIterator(real_filename,rbegin,rend)])
return Response(s,status=206, headers=headers, direct_passthrough=True)
else:
return Response(FileIterator(real_filename,rbegin,rend),
status=206, headers=headers, direct_passthrough=True)
#process fileobj
if fileobj:
f, mtime, file_size = fileobj
else:
f, mtime, file_size = _opener(real_filename)
headers.append(('Date', http_date()))
if cache:
etag = _generate_etag(mtime, file_size, real_filename)
headers += [
('ETag', '"%s"' % etag),
]
if cache_timeout:
headers += [
('Cache-Control', 'max-age=%d, public' % cache_timeout),
('Expires', http_date(time() + cache_timeout))
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
return Response(status=304, headers=headers)
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
return Response(wrap_file(environ, f), status=200, headers=headers,
direct_passthrough=True)
|
OSError
|
dataset/ETHPy150Open limodou/uliweb/uliweb/utils/filedown.py/filedown
|
9,643
|
def get_client(self, name):
if name in self._connected_clients:
return self._connected_clients[name]
try:
aclass = next(s for s in self._supported_clients if name in
s.__name__)
sclient = aclass()
connected_client = sclient.create()
self._connected_clients[name] = connected_client
return connected_client
except __HOLE__:
LOG.warning("Requested client %s not found", name)
raise
|
StopIteration
|
dataset/ETHPy150Open openstack/kolla/tests/clients.py/OpenStackClients.get_client
|
9,644
|
def start(self):
"""
Starts the IEDriver Service.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
try:
cmd = [self.path, "--port=%d" % self.port]
if self.host is not None:
cmd.append("--host=%s" % self.host)
if self.log_level is not None:
cmd.append("--log-level=%s" % self.log_level)
if self.log_file is not None:
cmd.append("--log-file=%s" % self.log_file)
self.process = subprocess.Popen(cmd,
stdout=PIPE, stderr=PIPE)
except __HOLE__:
raise
except:
raise WebDriverException(
"IEDriver executable needs to be available in the path. "
"Please download from http://selenium-release.storage.googleapis.com/index.html "
"and read up at http://code.google.com/p/selenium/wiki/InternetExplorerDriver")
count = 0
while not utils.is_url_connectable(self.port):
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to the IEDriver")
|
TypeError
|
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/selenium/webdriver/ie/service.py/Service.start
|
9,645
|
def stop(self):
"""
Tells the IEDriver to stop and cleans up the process
"""
#If its dead dont worry
if self.process is None:
return
#Tell the Server to die!
try:
from urllib import request as url_request
except __HOLE__:
import urllib2 as url_request
url_request.urlopen("http://127.0.0.1:%d/shutdown" % self.port)
count = 0
while utils.is_connectable(self.port):
if count == 30:
break
count += 1
time.sleep(1)
#Tell the Server to properly die in case
try:
if self.process:
self.process.kill()
self.process.wait()
except WindowsError:
# kill may not be available under windows environment
pass
|
ImportError
|
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/selenium/webdriver/ie/service.py/Service.stop
|
9,646
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('key', metavar='KEY', nargs='?', default=None,
help='key associated with values to reduce')
group = parser.add_mutually_exclusive_group()
group.add_argument('--min', action='store_true', default=None)
group.add_argument('--max', action='store_true', default=None)
group.add_argument('--sum', action='store_true', default=None)
group.add_argument('--mean', action='store_true', default=None)
group.add_argument('--dist', action='store_true', default=None)
group.add_argument('--count', action='store_true', default=None)
group.add_argument('--unique', action='store_true', default=None)
group.add_argument('--join', metavar='STR', default=None, help='string to use for a join')
args = parser.parse_args()
if sys.stdin.isatty():
parser.error('no input, pipe another btc command output into this command')
l = sys.stdin.read()
if len(l.strip()) == 0:
exit(1)
try:
l = decoder.decode(l)
except __HOLE__:
error('unexpected input: %s' % l)
if not isinstance(l, list):
error('input must be a list')
elif args.key and not all(isinstance(x, dict) for x in l):
error('list items must be dictionaries when specifying a key')
out = []
for i, e in enumerate(l):
try:
out.append(e[args.key] if args.key else e)
except KeyError:
error('key not found: {}'.format(args.key))
f = None
if args.min:
f = min
elif args.max:
f = max
elif args.sum:
if not all(isinstance(x, float) or isinstance(x, int) for x in out):
error('sum requires numerical values')
f = sum
elif args.mean:
if not all(isinstance(x, float) or isinstance(x, int) for x in out):
error('mean requires numerical values')
f = lambda l: float(sum(l)) / len(l) if len(l) > 0 else float('nan')
elif args.dist:
f = lambda l: dict(collections.Counter(l).most_common())
elif args.count:
f = lambda l: len(l)
elif args.unique:
f = lambda l: list(set(l))
elif args.join is not None:
f = lambda l: args.join.join(l)
else:
f = lambda l: '\n'.join(l)
if args.unique or args.dist:
print(encoder.encode(f(out)))
else:
print(f(out))
|
ValueError
|
dataset/ETHPy150Open bittorrent/btc/btc/btc_reduce.py/main
|
9,647
|
def load(self):
if self.savefile is None:
return
if os.path.exists(self.savefile):
try:
# set dirty flag and re-save if invalid data in save file
dirty = False
data = json.load(open(self.savefile))
for item in data:
name, rec = item.items()[0]
type_ = rec.pop('type')
if name and rec:
(success, msg) = self.set_record(name, type_, rec, False)
log.msg(msg)
if not success:
dirty = True
if dirty:
self.save()
except __HOLE__:
log.msg("No JSON in save file")
|
ValueError
|
dataset/ETHPy150Open yaybu/callsign/callsign/dns.py/RuntimeAuthority.load
|
9,648
|
def read_previous_results():
"""Read results of previous run.
:return: dictionary of results if exist
"""
try:
with open(settings.LOAD_TESTS_PATHS['load_previous_tests_results'],
'r') as results_file:
results = jsonutils.load(results_file)
except (IOError, __HOLE__):
results = {}
return results
|
ValueError
|
dataset/ETHPy150Open openstack/fuel-web/nailgun/nailgun/test/performance/base.py/read_previous_results
|
9,649
|
def nova_notify_supported():
try:
import neutron.notifiers.nova # noqa since unused
return True
except __HOLE__:
return False
|
ImportError
|
dataset/ETHPy150Open openstack/neutron/neutron/cmd/sanity/checks.py/nova_notify_supported
|
9,650
|
def ofctl_arg_supported(cmd, **kwargs):
"""Verify if ovs-ofctl binary supports cmd with **kwargs.
:param cmd: ovs-ofctl command to use for test.
:param **kwargs: arguments to test with the command.
:returns: a boolean if the supplied arguments are supported.
"""
br_name = base.get_rand_device_name(prefix='br-test-')
with ovs_lib.OVSBridge(br_name) as test_br:
full_args = ["ovs-ofctl", cmd, test_br.br_name,
ovs_lib._build_flow_expr_str(kwargs, cmd.split('-')[0])]
try:
agent_utils.execute(full_args, run_as_root=True)
except __HOLE__ as e:
LOG.debug("Exception while checking supported feature via "
"command %s. Exception: %s", full_args, e)
return False
except Exception:
LOG.exception(_LE("Unexpected exception while checking supported"
" feature via command: %s"), full_args)
return False
else:
return True
|
RuntimeError
|
dataset/ETHPy150Open openstack/neutron/neutron/cmd/sanity/checks.py/ofctl_arg_supported
|
9,651
|
def dnsmasq_version_supported():
try:
cmd = ['dnsmasq', '--version']
env = {'LC_ALL': 'C'}
out = agent_utils.execute(cmd, addl_env=env)
m = re.search(r"version (\d+\.\d+)", out)
ver = float(m.group(1)) if m else 0
if ver < MINIMUM_DNSMASQ_VERSION:
return False
except (OSError, RuntimeError, __HOLE__, ValueError) as e:
LOG.debug("Exception while checking minimal dnsmasq version. "
"Exception: %s", e)
return False
return True
|
IndexError
|
dataset/ETHPy150Open openstack/neutron/neutron/cmd/sanity/checks.py/dnsmasq_version_supported
|
9,652
|
def ovsdb_native_supported():
# Running the test should ensure we are configured for OVSDB native
try:
ovs = ovs_lib.BaseOVS()
ovs.get_bridges()
return True
except __HOLE__ as ex:
LOG.error(_LE("Failed to import required modules. Ensure that the "
"python-openvswitch package is installed. Error: %s"),
ex)
except Exception:
LOG.exception(_LE("Unexpected exception occurred."))
return False
|
ImportError
|
dataset/ETHPy150Open openstack/neutron/neutron/cmd/sanity/checks.py/ovsdb_native_supported
|
9,653
|
def ovs_conntrack_supported():
br_name = base.get_rand_device_name(prefix="ovs-test-")
with ovs_lib.OVSBridge(br_name) as br:
try:
br.set_protocols(
"OpenFlow10,OpenFlow11,OpenFlow12,OpenFlow13,OpenFlow14")
except __HOLE__ as e:
LOG.debug("Exception while checking ovs conntrack support: %s", e)
return False
return ofctl_arg_supported(cmd='add-flow', ct_state='+trk', actions='drop')
|
RuntimeError
|
dataset/ETHPy150Open openstack/neutron/neutron/cmd/sanity/checks.py/ovs_conntrack_supported
|
9,654
|
def ebtables_supported():
try:
cmd = ['ebtables', '--version']
agent_utils.execute(cmd)
return True
except (__HOLE__, RuntimeError, IndexError, ValueError) as e:
LOG.debug("Exception while checking for installed ebtables. "
"Exception: %s", e)
return False
|
OSError
|
dataset/ETHPy150Open openstack/neutron/neutron/cmd/sanity/checks.py/ebtables_supported
|
9,655
|
def ipset_supported():
try:
cmd = ['ipset', '--version']
agent_utils.execute(cmd)
return True
except (OSError, RuntimeError, __HOLE__, ValueError) as e:
LOG.debug("Exception while checking for installed ipset. "
"Exception: %s", e)
return False
|
IndexError
|
dataset/ETHPy150Open openstack/neutron/neutron/cmd/sanity/checks.py/ipset_supported
|
9,656
|
def ip6tables_supported():
try:
cmd = ['ip6tables', '--version']
agent_utils.execute(cmd)
return True
except (OSError, RuntimeError, IndexError, __HOLE__) as e:
LOG.debug("Exception while checking for installed ip6tables. "
"Exception: %s", e)
return False
|
ValueError
|
dataset/ETHPy150Open openstack/neutron/neutron/cmd/sanity/checks.py/ip6tables_supported
|
9,657
|
def dibbler_version_supported():
try:
cmd = ['dibbler-client',
'help']
out = agent_utils.execute(cmd)
return '-w' in out
except (OSError, __HOLE__, IndexError, ValueError) as e:
LOG.debug("Exception while checking minimal dibbler version. "
"Exception: %s", e)
return False
|
RuntimeError
|
dataset/ETHPy150Open openstack/neutron/neutron/cmd/sanity/checks.py/dibbler_version_supported
|
9,658
|
def detect_c_compiler(self, want_cross):
evar = 'CC'
if self.is_cross_build() and want_cross:
compilers = [self.cross_info.config['binaries']['c']]
ccache = []
is_cross = True
exe_wrap = self.cross_info.config['binaries'].get('exe_wrapper', None)
elif evar in os.environ:
compilers = os.environ[evar].split()
ccache = []
is_cross = False
exe_wrap = None
else:
compilers = self.default_c
ccache = self.detect_ccache()
is_cross = False
exe_wrap = None
popen_exceptions = {}
for compiler in compilers:
try:
basename = os.path.basename(compiler).lower()
if basename == 'cl' or basename == 'cl.exe':
arg = '/?'
else:
arg = '--version'
p = subprocess.Popen([compiler, arg], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except __HOLE__ as e:
popen_exceptions[' '.join([compiler, arg])] = e
continue
(out, err) = p.communicate()
out = out.decode(errors='ignore')
err = err.decode(errors='ignore')
vmatch = re.search(Environment.version_regex, out)
if vmatch:
version = vmatch.group(0)
else:
version = 'unknown version'
if 'apple' in out and 'Free Software Foundation' in out:
return GnuCCompiler(ccache + [compiler], version, GCC_OSX, is_cross, exe_wrap)
if (out.startswith('cc') or 'gcc' in out) and \
'Free Software Foundation' in out:
lowerout = out.lower()
if 'mingw' in lowerout or 'msys' in lowerout or 'mingw' in compiler.lower():
gtype = GCC_MINGW
else:
gtype = GCC_STANDARD
return GnuCCompiler(ccache + [compiler], version, gtype, is_cross, exe_wrap)
if 'clang' in out:
if 'Apple' in out:
cltype = CLANG_OSX
else:
cltype = CLANG_STANDARD
return ClangCCompiler(ccache + [compiler], version, cltype, is_cross, exe_wrap)
if 'Microsoft' in out or 'Microsoft' in err:
# Visual Studio prints version number to stderr but
# everything else to stdout. Why? Lord only knows.
version = re.search(Environment.version_regex, err).group()
return VisualStudioCCompiler([compiler], version, is_cross, exe_wrap)
errmsg = 'Unknown compiler(s): "' + ', '.join(compilers) + '"'
if popen_exceptions:
errmsg += '\nThe follow exceptions were encountered:'
for (c, e) in popen_exceptions.items():
errmsg += '\nRunning "{0}" gave "{1}"'.format(c, e)
raise EnvironmentException(errmsg)
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_c_compiler
|
9,659
|
def detect_fortran_compiler(self, want_cross):
evar = 'FC'
if self.is_cross_build() and want_cross:
compilers = [self.cross_info['fortran']]
is_cross = True
exe_wrap = self.cross_info.get('exe_wrapper', None)
elif evar in os.environ:
compilers = os.environ[evar].split()
is_cross = False
exe_wrap = None
else:
compilers = self.default_fortran
is_cross = False
exe_wrap = None
popen_exceptions = {}
for compiler in compilers:
for arg in ['--version', '-V']:
try:
p = subprocess.Popen([compiler, arg],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except __HOLE__ as e:
popen_exceptions[' '.join([compiler, arg])] = e
continue
(out, err) = p.communicate()
out = out.decode(errors='ignore')
err = err.decode(errors='ignore')
version = 'unknown version'
vmatch = re.search(Environment.version_regex, out)
if vmatch:
version = vmatch.group(0)
if 'GNU Fortran' in out:
return GnuFortranCompiler([compiler], version, GCC_STANDARD, is_cross, exe_wrap)
if 'G95' in out:
return G95FortranCompiler([compiler], version, is_cross, exe_wrap)
if 'Sun Fortran' in err:
version = 'unknown version'
vmatch = re.search(Environment.version_regex, err)
if vmatch:
version = vmatch.group(0)
return SunFortranCompiler([compiler], version, is_cross, exe_wrap)
if 'ifort (IFORT)' in out:
return IntelFortranCompiler([compiler], version, is_cross, exe_wrap)
if 'PathScale EKOPath(tm)' in err:
return PathScaleFortranCompiler([compiler], version, is_cross, exe_wrap)
if 'pgf90' in out:
return PGIFortranCompiler([compiler], version, is_cross, exe_wrap)
if 'Open64 Compiler Suite' in err:
return Open64FortranCompiler([compiler], version, is_cross, exe_wrap)
if 'NAG Fortran' in err:
return NAGFortranCompiler([compiler], version, is_cross, exe_wrap)
errmsg = 'Unknown compiler(s): "' + ', '.join(compilers) + '"'
if popen_exceptions:
errmsg += '\nThe follow exceptions were encountered:'
for (c, e) in popen_exceptions.items():
errmsg += '\nRunning "{0}" gave "{1}"'.format(c, e)
raise EnvironmentException(errmsg)
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_fortran_compiler
|
9,660
|
def detect_cpp_compiler(self, want_cross):
evar = 'CXX'
if self.is_cross_build() and want_cross:
compilers = [self.cross_info.config['binaries']['cpp']]
ccache = []
is_cross = True
exe_wrap = self.cross_info.config['binaries'].get('exe_wrapper', None)
elif evar in os.environ:
compilers = os.environ[evar].split()
ccache = []
is_cross = False
exe_wrap = None
else:
compilers = self.default_cpp
ccache = self.detect_ccache()
is_cross = False
exe_wrap = None
popen_exceptions = {}
for compiler in compilers:
basename = os.path.basename(compiler).lower()
if basename == 'cl' or basename == 'cl.exe':
arg = '/?'
else:
arg = '--version'
try:
p = subprocess.Popen([compiler, arg],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except __HOLE__ as e:
popen_exceptions[' '.join([compiler, arg])] = e
continue
(out, err) = p.communicate()
out = out.decode(errors='ignore')
err = err.decode(errors='ignore')
vmatch = re.search(Environment.version_regex, out)
if vmatch:
version = vmatch.group(0)
else:
version = 'unknown version'
if 'apple' in out and 'Free Software Foundation' in out:
return GnuCPPCompiler(ccache + [compiler], version, GCC_OSX, is_cross, exe_wrap)
if (out.startswith('c++ ') or 'g++' in out or 'GCC' in out) and \
'Free Software Foundation' in out:
lowerout = out.lower()
if 'mingw' in lowerout or 'msys' in lowerout or 'mingw' in compiler.lower():
gtype = GCC_MINGW
else:
gtype = GCC_STANDARD
return GnuCPPCompiler(ccache + [compiler], version, gtype, is_cross, exe_wrap)
if 'clang' in out:
if 'Apple' in out:
cltype = CLANG_OSX
else:
cltype = CLANG_STANDARD
return ClangCPPCompiler(ccache + [compiler], version, cltype, is_cross, exe_wrap)
if 'Microsoft' in out or 'Microsoft' in err:
version = re.search(Environment.version_regex, err).group()
return VisualStudioCPPCompiler([compiler], version, is_cross, exe_wrap)
errmsg = 'Unknown compiler(s): "' + ', '.join(compilers) + '"'
if popen_exceptions:
errmsg += '\nThe follow exceptions were encountered:'
for (c, e) in popen_exceptions.items():
errmsg += '\nRunning "{0}" gave "{1}"'.format(c, e)
raise EnvironmentException(errmsg)
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_cpp_compiler
|
9,661
|
def detect_objc_compiler(self, want_cross):
if self.is_cross_build() and want_cross:
exelist = [self.cross_info['objc']]
is_cross = True
exe_wrap = self.cross_info.get('exe_wrapper', None)
else:
exelist = self.get_objc_compiler_exelist()
is_cross = False
exe_wrap = None
try:
p = subprocess.Popen(exelist + ['--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
raise EnvironmentException('Could not execute ObjC compiler "%s"' % ' '.join(exelist))
(out, err) = p.communicate()
out = out.decode(errors='ignore')
err = err.decode(errors='ignore')
vmatch = re.search(Environment.version_regex, out)
if vmatch:
version = vmatch.group(0)
else:
version = 'unknown version'
if (out.startswith('cc ') or 'gcc' in out) and \
'Free Software Foundation' in out:
return GnuObjCCompiler(exelist, version, is_cross, exe_wrap)
if out.startswith('Apple LLVM'):
return ClangObjCCompiler(exelist, version, CLANG_OSX, is_cross, exe_wrap)
if 'apple' in out and 'Free Software Foundation' in out:
return GnuObjCCompiler(exelist, version, is_cross, exe_wrap)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_objc_compiler
|
9,662
|
def detect_objcpp_compiler(self, want_cross):
if self.is_cross_build() and want_cross:
exelist = [self.cross_info['objcpp']]
is_cross = True
exe_wrap = self.cross_info.get('exe_wrapper', None)
else:
exelist = self.get_objcpp_compiler_exelist()
is_cross = False
exe_wrap = None
try:
p = subprocess.Popen(exelist + ['--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
raise EnvironmentException('Could not execute ObjC++ compiler "%s"' % ' '.join(exelist))
(out, err) = p.communicate()
out = out.decode(errors='ignore')
err = err.decode(errors='ignore')
vmatch = re.search(Environment.version_regex, out)
if vmatch:
version = vmatch.group(0)
else:
version = 'unknown version'
if (out.startswith('c++ ') or out.startswith('g++')) and \
'Free Software Foundation' in out:
return GnuObjCPPCompiler(exelist, version, is_cross, exe_wrap)
if out.startswith('Apple LLVM'):
return ClangObjCPPCompiler(exelist, version, CLANG_OSX, is_cross, exe_wrap)
if 'apple' in out and 'Free Software Foundation' in out:
return GnuObjCPPCompiler(exelist, version, is_cross, exe_wrap)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_objcpp_compiler
|
9,663
|
def detect_java_compiler(self):
exelist = ['javac']
try:
p = subprocess.Popen(exelist + ['-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
raise EnvironmentException('Could not execute Java compiler "%s"' % ' '.join(exelist))
(out, err) = p.communicate()
out = out.decode(errors='ignore')
err = err.decode(errors='ignore')
vmatch = re.search(Environment.version_regex, err)
if vmatch:
version = vmatch.group(0)
else:
version = 'unknown version'
if 'javac' in err:
return JavaCompiler(exelist, version)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_java_compiler
|
9,664
|
def detect_cs_compiler(self):
exelist = ['mcs']
try:
p = subprocess.Popen(exelist + ['--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
raise EnvironmentException('Could not execute C# compiler "%s"' % ' '.join(exelist))
(out, err) = p.communicate()
out = out.decode(errors='ignore')
err = err.decode(errors='ignore')
vmatch = re.search(Environment.version_regex, out)
if vmatch:
version = vmatch.group(0)
else:
version = 'unknown version'
if 'Mono' in out:
return MonoCompiler(exelist, version)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_cs_compiler
|
9,665
|
def detect_vala_compiler(self):
exelist = ['valac']
try:
p = subprocess.Popen(exelist + ['--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
raise EnvironmentException('Could not execute Vala compiler "%s"' % ' '.join(exelist))
(out, _) = p.communicate()
out = out.decode(errors='ignore')
vmatch = re.search(Environment.version_regex, out)
if vmatch:
version = vmatch.group(0)
else:
version = 'unknown version'
if 'Vala' in out:
return ValaCompiler(exelist, version)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_vala_compiler
|
9,666
|
def detect_rust_compiler(self):
exelist = ['rustc']
try:
p = subprocess.Popen(exelist + ['--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
raise EnvironmentException('Could not execute Rust compiler "%s"' % ' '.join(exelist))
(out, _) = p.communicate()
out = out.decode(errors='ignore')
vmatch = re.search(Environment.version_regex, out)
if vmatch:
version = vmatch.group(0)
else:
version = 'unknown version'
if 'rustc' in out:
return RustCompiler(exelist, version)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_rust_compiler
|
9,667
|
def detect_swift_compiler(self):
exelist = ['swiftc']
try:
p = subprocess.Popen(exelist + ['-v'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
raise EnvironmentException('Could not execute Swift compiler "%s"' % ' '.join(exelist))
(_, err) = p.communicate()
err = err.decode(errors='ignore')
vmatch = re.search(Environment.version_regex, err)
if vmatch:
version = vmatch.group(0)
else:
version = 'unknown version'
if 'Swift' in err:
return SwiftCompiler(exelist, version)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_swift_compiler
|
9,668
|
def detect_static_linker(self, compiler):
if compiler.is_cross:
linker = self.cross_info.config['binaries']['ar']
else:
evar = 'AR'
if evar in os.environ:
linker = os.environ[evar].strip()
elif isinstance(compiler, VisualStudioCCompiler):
linker= self.vs_static_linker
else:
linker = self.default_static_linker
basename = os.path.basename(linker).lower()
if basename == 'lib' or basename == 'lib.exe':
arg = '/?'
else:
arg = '--version'
try:
p = subprocess.Popen([linker, arg], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
raise EnvironmentException('Could not execute static linker "%s".' % linker)
(out, err) = p.communicate()
out = out.decode(errors='ignore')
err = err.decode(errors='ignore')
if '/OUT:' in out or '/OUT:' in err:
return VisualStudioLinker([linker])
if p.returncode == 0:
return ArLinker([linker])
if p.returncode == 1 and err.startswith('usage'): # OSX
return ArLinker([linker])
raise EnvironmentException('Unknown static linker "%s"' % linker)
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_static_linker
|
9,669
|
def detect_ccache(self):
try:
has_ccache = subprocess.call(['ccache', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except __HOLE__:
has_ccache = 1
if has_ccache == 0:
cmdlist = ['ccache']
else:
cmdlist = []
return cmdlist
|
OSError
|
dataset/ETHPy150Open mesonbuild/meson/mesonbuild/environment.py/Environment.detect_ccache
|
9,670
|
@staticmethod
def load_page(key):
print("Loading %s" % key)
try:
raw = wikipedia.page(key, preload=True)
print(unidecode(raw.content[:80]))
print(unidecode(str(raw.links)[:80]))
print(unidecode(str(raw.categories)[:80]))
except __HOLE__:
print("Key error")
raw = None
except wikipedia.exceptions.DisambiguationError:
print("Disambig error!")
raw = None
except wikipedia.exceptions.PageError:
print("Page error!")
raw = None
except ReadTimeout:
# Wait a while, see if the network comes back
print("Connection error, waiting 10 minutes ...")
sleep(600)
print("trying again")
return self[key]
except ConnectionError:
# Wait a while, see if the network comes back
print("Connection error, waiting 10 minutes ...")
sleep(600)
print("trying again")
return self[key]
except ValueError:
# Wait a while, see if the network comes back
print("Connection error, waiting 10 minutes ...")
sleep(600)
print("trying again")
return self[key]
except WikipediaException:
# Wait a while, see if the network comes back
print("Connection error, waiting 10 minutes ...")
sleep(600)
print("trying again")
return load_page(key)
return raw
|
KeyError
|
dataset/ETHPy150Open Pinafore/qb/util/cached_wikipedia.py/CachedWikipedia.load_page
|
9,671
|
def __getitem__(self, key):
key = key.replace("_", " ")
if key in self._cache:
return self._cache[key]
if "/" in key:
filename = "%s/%s" % (self._path, key.replace("/", "---"))
else:
filename = "%s/%s" % (self._path, key)
page = None
if os.path.exists(filename):
try:
page = pickle.load(open(filename, 'rb'))
except pickle.UnpicklingError:
page = None
except __HOLE__:
print("Error loading %s" % key)
page = None
except ImportError:
print("Error importing %s" % key)
page = None
if page is None:
if key in self._countries:
raw = [CachedWikipedia.load_page("%s%s" %
(x, self._countries[key]))
for x in kCOUNTRY_SUB]
raw.append(CachedWikipedia.load_page(key))
print("%s is a country!" % key)
else:
raw = [CachedWikipedia.load_page(key)]
raw = [x for x in raw if not x is None]
sleep(.3)
if raw:
if len(raw) > 1:
print("%i pages for %s" % (len(raw), key))
page = WikipediaPage("\n".join(unidecode(x.content) for
x in raw),
[y for y in
x.links
for x in raw],
[y for y in
x.categories
for x in raw])
print("Writing file to %s" % filename)
pickle.dump(page, open(filename, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
else:
print("Dummy page for %s" % key)
page = WikipediaPage()
if self._write_dummy:
pickle.dump(page, open(filename, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
self._cache[key] = page
return page
|
AttributeError
|
dataset/ETHPy150Open Pinafore/qb/util/cached_wikipedia.py/CachedWikipedia.__getitem__
|
9,672
|
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except __HOLE__:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
|
TypeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/gis/geos/mutable_list.py/ListMixin._set_slice
|
9,673
|
def test_should_not_use_m2m_field_name_for_update_field_list(self):
another_user = UserModel.objects.create(name='vova')
data = {
'title': 'goodbye',
'users_liked': [self.user.pk, another_user.pk]
}
serializer = CommentSerializer(
instance=self.get_comment(), data=data, partial=True)
self.assertTrue(serializer.is_valid())
try:
serializer.save()
except __HOLE__:
self.fail(
'If m2m field used in partial update then it should not be used in update_fields list')
fresh_instance = self.get_comment()
self.assertEqual(fresh_instance.title, 'goodbye')
users_liked = set(
fresh_instance.users_liked.all().values_list('pk', flat=True))
self.assertEqual(
users_liked, set([self.user.pk, another_user.pk]))
|
ValueError
|
dataset/ETHPy150Open chibisov/drf-extensions/tests_app/tests/unit/serializers/tests.py/PartialUpdateSerializerMixinTest.test_should_not_use_m2m_field_name_for_update_field_list
|
9,674
|
def test_should_not_use_related_set_field_name_for_update_field_list(self):
another_user = UserModel.objects.create(name='vova')
another_comment = CommentModel.objects.create(
user=another_user,
title='goodbye',
text='moon',
)
data = {
'name': 'vova',
'comments': [another_comment.pk]
}
serializer = UserSerializer(instance=another_user, data=data, partial=True)
self.assertTrue(serializer.is_valid())
serializer.save()
try:
serializer.save()
except __HOLE__:
self.fail('If related set field used in partial update then it should not be used in update_fields list')
fresh_comment = CommentModel.objects.get(pk=another_comment.pk)
fresh_user = UserModel.objects.get(pk=another_user.pk)
self.assertEqual(fresh_comment.user, another_user)
self.assertEqual(fresh_user.name, 'vova')
|
ValueError
|
dataset/ETHPy150Open chibisov/drf-extensions/tests_app/tests/unit/serializers/tests.py/PartialUpdateSerializerMixinTest.test_should_not_use_related_set_field_name_for_update_field_list
|
9,675
|
def test_should_not_try_to_update_fields_that_are_not_in_model(self):
data = {
'title': 'goodbye',
'not_existing_field': 'moon'
}
serializer = CommentSerializer(instance=self.get_comment(), data=data, partial=True)
self.assertTrue(serializer.is_valid())
try:
serializer.save()
except __HOLE__:
msg = 'Should not pass values to update_fields from data, if they are not in model'
self.fail(msg)
fresh_instance = self.get_comment()
self.assertEqual(fresh_instance.title, 'goodbye')
self.assertEqual(fresh_instance.text, 'world')
|
ValueError
|
dataset/ETHPy150Open chibisov/drf-extensions/tests_app/tests/unit/serializers/tests.py/PartialUpdateSerializerMixinTest.test_should_not_try_to_update_fields_that_are_not_in_model
|
9,676
|
def test_should_not_use_update_fields_when_related_objects_are_saving(self):
data = {
'title': 'goodbye',
'user': {
'id': self.user.pk,
'name': 'oleg'
}
}
serializer = CommentSerializerWithExpandedUsersLiked(instance=self.get_comment(), data=data, partial=True)
self.assertTrue(serializer.is_valid())
try:
serializer.save()
except __HOLE__:
self.fail('If serializer has expanded related serializer, then it should not use update_fields while '
'saving related object')
fresh_instance = self.get_comment()
self.assertEqual(fresh_instance.title, 'goodbye')
if get_rest_framework_features()['save_related_serializers']:
self.assertEqual(fresh_instance.user.name, 'oleg')
|
ValueError
|
dataset/ETHPy150Open chibisov/drf-extensions/tests_app/tests/unit/serializers/tests.py/PartialUpdateSerializerMixinTest.test_should_not_use_update_fields_when_related_objects_are_saving
|
9,677
|
def test_should_not_use_pk_field_for_update_fields(self):
old_pk = self.get_comment().pk
data = {
'id': old_pk + 1,
'title': 'goodbye'
}
serializer = CommentSerializer(
instance=self.get_comment(), data=data, partial=True)
self.assertTrue(serializer.is_valid())
try:
serializer.save()
except __HOLE__:
self.fail(
'Primary key field should be excluded from update_fields list')
fresh_instance = self.get_comment()
self.assertEqual(fresh_instance.pk, old_pk)
self.assertEqual(fresh_instance.title, u'goodbye')
|
ValueError
|
dataset/ETHPy150Open chibisov/drf-extensions/tests_app/tests/unit/serializers/tests.py/PartialUpdateSerializerMixinTest.test_should_not_use_pk_field_for_update_fields
|
9,678
|
def save_instance(self, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance
``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed
and just needs to be saved.
"""
if construct:
instance = construct_instance(self, instance, fields, exclude)
opts = instance._meta
if self.errors:
raise ValueError("the %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# wrap up the saving of m2m data as a function.
def save_m2m(instance):
cleaned_data = self.cleaned_data
for f in opts.many_to_many:
if fields and f.name not in fields:
continue
if f.name in cleaned_data:
for validator in f.validators:
validator(instance, cleaned_data[f.name])
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# TODO XXX, why are we not using transactions?
# if we are committing, save the instance and the m2m data
# immediately.
if not instance.pk:
rollback = True
else:
rollback = False
instance.save()
try:
save_m2m(instance)
# ^-- pass instance so we can validate it's views
except __HOLE__:
if rollback:
self.delete_instance(instance)
# we didn't call ensure_label_domain so it's not our
# responsibility to call prune_tree
raise
else:
# we're not committing. add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = save_m2m
return instance
|
ValidationError
|
dataset/ETHPy150Open mozilla/inventory/mozdns/forms.py/BaseForm.save_instance
|
9,679
|
@defer.inlineCallbacks
def on_GET(self, request):
requester = yield self.auth.get_user_by_req(
request,
allow_guest=True,
)
is_guest = requester.is_guest
room_id = None
if is_guest:
if "room_id" not in request.args:
raise SynapseError(400, "Guest users must specify room_id param")
if "room_id" in request.args:
room_id = request.args["room_id"][0]
try:
handler = self.handlers.event_stream_handler
pagin_config = PaginationConfig.from_request(request)
timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
if "timeout" in request.args:
try:
timeout = int(request.args["timeout"][0])
except __HOLE__:
raise SynapseError(400, "timeout must be in milliseconds.")
as_client_event = "raw" not in request.args
chunk = yield handler.get_stream(
requester.user.to_string(),
pagin_config,
timeout=timeout,
as_client_event=as_client_event,
affect_presence=(not is_guest),
room_id=room_id,
is_guest=is_guest,
)
except:
logger.exception("Event stream failed")
raise
defer.returnValue((200, chunk))
|
ValueError
|
dataset/ETHPy150Open matrix-org/synapse/synapse/rest/client/v1/events.py/EventStreamRestServlet.on_GET
|
9,680
|
def save_segment(self, term, term_info, update=False):
"""
Writes out new index data to disk.
Takes a ``term`` string & ``term_info`` dict. It will
rewrite the segment in alphabetical order, adding in the data
where appropriate.
Optionally takes an ``update`` parameter, which is a boolean &
determines whether the provided ``term_info`` should overwrite or
update the data in the segment. Default is ``False`` (overwrite).
"""
seg_name = self.make_segment_name(term)
new_seg_file = tempfile.NamedTemporaryFile(delete=False)
written = False
if not os.path.exists(seg_name):
# If it doesn't exist, touch it.
with open(seg_name, 'w') as seg_file:
seg_file.write('')
with open(seg_name, 'r') as seg_file:
for line in seg_file:
seg_term, seg_term_info = self.parse_record(line)
if not written and seg_term > term:
# We're at the alphabetical location & need to insert.
new_line = self.make_record(term, term_info)
new_seg_file.write(new_line.encode('utf-8'))
written = True
elif seg_term == term:
if not update:
# Overwrite the line for the update.
line = self.make_record(term, term_info)
else:
# Update the existing record.
new_info = self.update_term_info(json.loads(seg_term_info), term_info)
line = self.make_record(term, new_info)
written = True
# Either we haven't reached it alphabetically or we're well-past.
# Write the line.
new_seg_file.write(line.encode('utf-8'))
if not written:
line = self.make_record(term, term_info)
new_seg_file.write(line.encode('utf-8'))
# Atomically move it into place.
new_seg_file.close()
try:
os.rename(new_seg_file.name, seg_name)
except __HOLE__:
os.remove(seg_name)
os.rename(new_seg_file.name, seg_name)
return True
|
OSError
|
dataset/ETHPy150Open toastdriven/microsearch/microsearch.py/Microsearch.save_segment
|
9,681
|
def git_version():
""" Parse version information from the current git commit.
Parse the output of `git describe` and return the git hash and the number
of commits since the last version tag.
"""
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env,
).communicate()[0]
return out
try:
# We ask git to find the latest tag matching a glob expression. The
# intention is to find a release tag of the form '4.50.2'. Strictly
# speaking, the glob expression also matches tags of the form
# '4abc.5xyz.2gtluuu', but it's very difficult with glob expressions
# to distinguish between the two cases, and the likelihood of a
# problem is minimal.
out = _minimal_ext_cmd(
['git', 'describe', '--match', '[0-9]*.[0-9]*.[0-9]*', '--tags'])
except __HOLE__:
out = ''
git_description = out.strip().decode('ascii')
expr = r'.*?\-(?P<count>\d+)-g(?P<hash>[a-fA-F0-9]+)'
match = re.match(expr, git_description)
if match is None:
git_revision, git_count = 'Unknown', '0'
else:
git_revision, git_count = match.group('hash'), match.group('count')
return git_revision, git_count
|
OSError
|
dataset/ETHPy150Open enthought/envisage/setup.py/git_version
|
9,682
|
def write_version_py(filename=_VERSION_FILENAME):
""" Create a file containing the version information. """
template = """\
# This file was automatically generated from the `setup.py` script.
version = '{version}'
full_version = '{full_version}'
git_revision = '{git_revision}'
is_released = {is_released}
if not is_released:
version = full_version
"""
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of _version messes
# up the build under Python 3.
fullversion = VERSION
if os.path.exists('.git'):
git_rev, dev_num = git_version()
elif os.path.exists(filename):
# must be a source distribution, use existing version file
try:
from envisage._version import git_revision as git_rev
from envisage._version import full_version as full_v
except __HOLE__:
msg = ("Unable to import 'git_revision' or 'full_revision'. "
"Try removing {} and the build directory before building.")
raise ImportError(msg.format(_VERSION_FILENAME))
match = re.match(r'.*?\.dev(?P<dev_num>\d+)', full_v)
if match is None:
dev_num = '0'
else:
dev_num = match.group('dev_num')
else:
git_rev = 'Unknown'
dev_num = '0'
if not IS_RELEASED:
fullversion += '.dev{0}'.format(dev_num)
with open(filename, "wt") as fp:
fp.write(template.format(version=VERSION,
full_version=fullversion,
git_revision=git_rev,
is_released=IS_RELEASED))
|
ImportError
|
dataset/ETHPy150Open enthought/envisage/setup.py/write_version_py
|
9,683
|
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except __HOLE__:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
|
ImportError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/templatetags/static.py/PrefixNode.handle_simple
|
9,684
|
def set_type(self, typ):
try:
del self.extension_attributes[XSI_NIL]
except KeyError:
pass
try:
self.extension_attributes[XSI_TYPE] = typ
except __HOLE__:
self._extatt[XSI_TYPE] = typ
|
AttributeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/saml.py/AttributeValueBase.set_type
|
9,685
|
def get_type(self):
try:
return self.extension_attributes[XSI_TYPE]
except (KeyError, __HOLE__):
try:
return self._extatt[XSI_TYPE]
except KeyError:
return ""
|
AttributeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/saml.py/AttributeValueBase.get_type
|
9,686
|
def clear_type(self):
try:
del self.extension_attributes[XSI_TYPE]
except KeyError:
pass
try:
del self._extatt[XSI_TYPE]
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/saml.py/AttributeValueBase.clear_type
|
9,687
|
def set_text(self, val, base64encode=False):
typ = self.get_type()
if base64encode:
import base64
val = base64.encodestring(val)
self.set_type("xs:base64Binary")
else:
if isinstance(val, basestring):
if not typ:
self.set_type("xs:string")
else:
try:
assert typ == "xs:string"
except AssertionError:
if typ == "xs:int":
_ = int(val)
elif typ == "xs:boolean":
if val.lower() not in ["true", "false"]:
raise ValueError("Not a boolean")
elif typ == "xs:float":
_ = float(val)
elif typ == "xs:base64Binary":
pass
else:
ValueError("Type and value doesn't match")
elif isinstance(val, bool):
if val:
val = "true"
else:
val = "false"
if not typ:
self.set_type("xs:boolean")
else:
assert typ == "xs:boolean"
elif isinstance(val, int):
val = str(val)
if not typ:
self.set_type("xs:integer")
else:
assert typ == "xs:integer"
elif isinstance(val, float):
val = str(val)
if not typ:
self.set_type("xs:float")
else:
assert typ == "xs:float"
elif not val:
try:
self.extension_attributes[XSI_TYPE] = typ
except __HOLE__:
self._extatt[XSI_TYPE] = typ
val = ""
else:
if typ == "xs:anyType":
pass
else:
raise ValueError
SamlBase.__setattr__(self, "text", val)
return self
|
AttributeError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/saml.py/AttributeValueBase.set_text
|
9,688
|
def harvest_element_tree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._convert_element_tree_to_member(child)
for attribute, value in tree.attrib.iteritems():
self._convert_element_attribute_to_member(attribute, value)
if tree.text:
#print "set_text:", tree.text
# clear type
#self.clear_type()
self.set_text(tree.text)
if XSI_NIL in self.extension_attributes:
del self.extension_attributes[XSI_NIL]
try:
typ = self.extension_attributes[XSI_TYPE]
_verify_value_type(typ, getattr(self, "text"))
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/saml.py/AttributeValueBase.harvest_element_tree
|
9,689
|
def __escape_extensible_value(self, value):
'''Escape X10 string values to remove ambiguity for special characters.'''
def _translate(char):
try:
return self.__ESCAPE_CHAR_MAP[char]
except __HOLE__:
return char
return ''.join(map(_translate, str(value)))
|
KeyError
|
dataset/ETHPy150Open kra3/py-ga-mob/pyga/requests.py/X10.__escape_extensible_value
|
9,690
|
def is_view_dart_script(view):
"""Checks whether @view looks like a Dart script file.
Returns `True` if @view's file name ends with '.dart'.
Returns `False` if @view isn't saved on disk.
"""
try:
if view.file_name() is None:
return False
return is_dart_script(view.file_name())
except __HOLE__:
# view is a path
return is_dart_script(view)
|
AttributeError
|
dataset/ETHPy150Open guillermooo/dart-sublime-bundle/lib/path.py/is_view_dart_script
|
9,691
|
def is_pubspec(path_or_view):
"""Returns `True` if @path_or_view is 'pubspec.yaml'.
"""
try:
if path_or_view.file_name() is None:
return
return path_or_view.file_name().endswith('pubspec.yaml')
except __HOLE__:
return path_or_view.endswith('pubspec.yaml')
|
AttributeError
|
dataset/ETHPy150Open guillermooo/dart-sublime-bundle/lib/path.py/is_pubspec
|
9,692
|
def only_for_dart_files(func):
@wraps(func)
def inner(self, view):
try:
fname = view.file_name()
if fname and is_dart_script(fname):
return func(self, view)
except __HOLE__:
assert view is None, "wrong usage"
return inner
|
AttributeError
|
dataset/ETHPy150Open guillermooo/dart-sublime-bundle/lib/path.py/only_for_dart_files
|
9,693
|
def _readNumber(self):
isfloat = False
result = self._next()
peek = self._peek()
while peek is not None and (peek.isdigit() or peek == "."):
isfloat = isfloat or peek == "."
result = result + self._next()
peek = self._peek()
try:
if isfloat:
return float(result)
else:
return int(result)
except __HOLE__:
raise ReadException, "Not a valid JSON number: '%s'" % result
|
ValueError
|
dataset/ETHPy150Open gashero/talklog/android_sms/json.py/JsonReader._readNumber
|
9,694
|
def _readString(self):
result = ""
assert self._next() == '"'
try:
while self._peek() != '"':
ch = self._next()
if ch == "\\":
ch = self._next()
if ch in 'brnft':
ch = self.escapes[ch]
elif ch == "u":
ch4096 = self._next()
ch256 = self._next()
ch16 = self._next()
ch1 = self._next()
n = 4096 * self._hexDigitToInt(ch4096)
n += 256 * self._hexDigitToInt(ch256)
n += 16 * self._hexDigitToInt(ch16)
n += self._hexDigitToInt(ch1)
ch = unichr(n)
elif ch not in '"/\\':
raise ReadException, "Not a valid escaped JSON character: '%s' in %s" % (ch, self._generator.all())
result = result + ch
except __HOLE__:
raise ReadException, "Not a valid JSON string: '%s'" % self._generator.all()
assert self._next() == '"'
return result
|
StopIteration
|
dataset/ETHPy150Open gashero/talklog/android_sms/json.py/JsonReader._readString
|
9,695
|
def _hexDigitToInt(self, ch):
try:
result = self.hex_digits[ch.upper()]
except KeyError:
try:
result = int(ch)
except __HOLE__:
raise ReadException, "The character %s is not a hex digit." % ch
return result
|
ValueError
|
dataset/ETHPy150Open gashero/talklog/android_sms/json.py/JsonReader._hexDigitToInt
|
9,696
|
def _readCStyleComment(self):
try:
done = False
while not done:
ch = self._next()
done = (ch == "*" and self._peek() == "/")
if not done and ch == "/" and self._peek() == "*":
raise ReadException, "Not a valid JSON comment: %s, '/*' cannot be embedded in the comment." % self._generator.all()
self._next()
except __HOLE__:
raise ReadException, "Not a valid JSON comment: %s, expected */" % self._generator.all()
|
StopIteration
|
dataset/ETHPy150Open gashero/talklog/android_sms/json.py/JsonReader._readCStyleComment
|
9,697
|
def _readDoubleSolidusComment(self):
try:
ch = self._next()
while ch != "\r" and ch != "\n":
ch = self._next()
except __HOLE__:
pass
|
StopIteration
|
dataset/ETHPy150Open gashero/talklog/android_sms/json.py/JsonReader._readDoubleSolidusComment
|
9,698
|
def google_adwords_sale(context):
"""
Output our receipt in the format that Google Adwords needs.
"""
order = context['order']
try:
request = context['request']
except KeyError:
print >> sys.stderr, "Template satchmo.show.templatetags.google.google_adwords_sale couldn't get the request from the context. Are you missing the request context_processor?"
return ""
secure = request.is_secure()
try:
language_code = request.LANGUAGE_CODE
except __HOLE__:
language_code = settings.LANGUAGE_CODE
return({"GOOGLE_ADWORDS_ID": config_value('GOOGLE', 'ADWORDS_ID'),
'Store': settings.SITE_NAME,
'value': order.total,
'label': 'purchase',
'secure' : secure,
'language_code' : language_code})
|
KeyError
|
dataset/ETHPy150Open dokterbob/satchmo/satchmo/apps/satchmo_store/shop/templatetags/satchmo_google.py/google_adwords_sale
|
9,699
|
def google_adwords_signup(context):
"""
Output signup info in the format that Google adwords needs.
"""
request = context['request']
try:
request = context['request']
except KeyError:
print >> sys.stderr, "Template satchmo.show.templatetags.google.google_adwords_sale couldn't get the request from the context. Are you missing the request context_processor?"
return ""
secure = request.is_secure()
try:
language_code = request.LANGUAGE_CODE
except __HOLE__:
language_code = settings.LANGUAGE_CODE
return({"GOOGLE_ADWORDS_ID": config_value('GOOGLE', 'ADWORDS_ID'),
'Store': settings.SITE_NAME,
'value': 1,
'label': 'signup',
'secure' : secure,
'language_code' : language_code})
|
AttributeError
|
dataset/ETHPy150Open dokterbob/satchmo/satchmo/apps/satchmo_store/shop/templatetags/satchmo_google.py/google_adwords_signup
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.