_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q273700 | Kernel.iteration_length | test | def iteration_length(self, dimension=None):
"""
Return the number of global loop iterations that are performed.
If dimension is not None, it is the loop dimension that is returned
(-1 is the inner most loop and 0 the outermost)
"""
total_length = 1
if dimension is not None:
loops = [self._loop_stack[dimension]]
else:
loops = reversed(self._loop_stack)
for var_name, start, end, incr in loops:
# This unspools the iterations:
length = end-start
total_length = total_length*length
return self.subs_consts(total_length) | python | {
"resource": ""
} |
q273701 | Kernel.get_loop_stack | test | def get_loop_stack(self, subs_consts=False):
"""Yield loop stack dictionaries in order from outer to inner."""
for l in self._loop_stack:
if subs_consts:
yield {'index': l[0],
'start': self.subs_consts(l[1]),
'stop': self.subs_consts(l[2]),
'increment': self.subs_consts(l[3])}
else:
yield {'index': l[0], 'start': l[1], 'stop': l[2], 'increment': l[3]} | python | {
"resource": ""
} |
q273702 | Kernel.index_order | test | def index_order(self, sources=True, destinations=True):
"""
Return the order of indices as they appear in array references.
Use *source* and *destination* to filter output
"""
if sources:
arefs = chain(*self.sources.values())
else:
arefs = []
if destinations:
arefs = chain(arefs, *self.destinations.values())
ret = []
for a in [aref for aref in arefs if aref is not None]:
ref = []
for expr in a:
ref.append(expr.free_symbols)
ret.append(ref)
return ret | python | {
"resource": ""
} |
q273703 | Kernel.compile_sympy_accesses | test | def compile_sympy_accesses(self, sources=True, destinations=True):
"""
Return a dictionary of lists of sympy accesses, for each variable.
Use *source* and *destination* to filter output
"""
sympy_accesses = defaultdict(list)
# Compile sympy accesses
for var_name in self.variables:
if sources:
for r in self.sources.get(var_name, []):
if r is None:
continue
sympy_accesses[var_name].append(self.access_to_sympy(var_name, r))
if destinations:
for w in self.destinations.get(var_name, []):
if w is None:
continue
sympy_accesses[var_name].append(self.access_to_sympy(var_name, w))
return sympy_accesses | python | {
"resource": ""
} |
q273704 | Kernel.compile_relative_distances | test | def compile_relative_distances(self, sympy_accesses=None):
"""
Return load and store distances between accesses.
:param sympy_accesses: optionally restrict accesses, default from compile_sympy_accesses()
e.g. if accesses are to [+N, +1, -1, -N], relative distances are [N-1, 2, N-1]
returned is a dict of list of sympy expressions, for each variable
"""
if sympy_accesses is None:
sympy_accesses = self.compile_sympy_accesses()
sympy_distances = defaultdict(list)
for var_name, accesses in sympy_accesses.items():
for i in range(1, len(accesses)):
sympy_distances[var_name].append((accesses[i-1]-accesses[i]).simplify())
return sympy_distances | python | {
"resource": ""
} |
q273705 | Kernel.global_iterator_to_indices | test | def global_iterator_to_indices(self, git=None):
"""
Return sympy expressions translating global_iterator to loop indices.
If global_iterator is given, an integer is returned
"""
# unwind global iteration count into loop counters:
base_loop_counters = {}
global_iterator = symbol_pos_int('global_iterator')
idiv = implemented_function(sympy.Function(str('idiv')), lambda x, y: x//y)
total_length = 1
last_incr = 1
for var_name, start, end, incr in reversed(self._loop_stack):
loop_var = symbol_pos_int(var_name)
# This unspools the iterations:
length = end-start # FIXME is incr handled correct here?
counter = start+(idiv(global_iterator*last_incr, total_length)*incr) % length
total_length = total_length*length
last_incr = incr
base_loop_counters[loop_var] = sympy.lambdify(
global_iterator,
self.subs_consts(counter), modules=[numpy, {'Mod': numpy.mod}])
if git is not None:
try: # Try to resolve to integer if global_iterator was given
base_loop_counters[loop_var] = sympy.Integer(self.subs_consts(counter))
continue
except (ValueError, TypeError):
base_loop_counters[loop_var] = base_loop_counters[loop_var](git)
return base_loop_counters | python | {
"resource": ""
} |
q273706 | Kernel.global_iterator | test | def global_iterator(self):
"""
Return global iterator sympy expression
"""
global_iterator = sympy.Integer(0)
total_length = sympy.Integer(1)
for var_name, start, end, incr in reversed(self._loop_stack):
loop_var = symbol_pos_int(var_name)
length = end - start # FIXME is incr handled correct here?
global_iterator += (loop_var - start) * total_length
total_length *= length
return global_iterator | python | {
"resource": ""
} |
q273707 | Kernel.indices_to_global_iterator | test | def indices_to_global_iterator(self, indices):
"""
Transform a dictionary of indices to a global iterator integer.
Inverse of global_iterator_to_indices().
"""
global_iterator = self.subs_consts(self.global_iterator().subs(indices))
return global_iterator | python | {
"resource": ""
} |
q273708 | Kernel.max_global_iteration | test | def max_global_iteration(self):
"""Return global iterator with last iteration number"""
return self.indices_to_global_iterator({
symbol_pos_int(var_name): end-1 for var_name, start, end, incr in self._loop_stack
}) | python | {
"resource": ""
} |
q273709 | Kernel.print_kernel_info | test | def print_kernel_info(self, output_file=sys.stdout):
"""Print kernel information in human readble format."""
table = (' idx | min max step\n' +
'---------+---------------------------------\n')
for l in self._loop_stack:
table += '{:>8} | {!r:>10} {!r:>10} {!r:>10}\n'.format(*l)
print(prefix_indent('loop stack: ', table), file=output_file)
table = (' name | offsets ...\n' +
'---------+------------...\n')
for name, offsets in list(self.sources.items()):
prefix = '{:>8} | '.format(name)
right_side = '\n'.join(['{!r:}'.format(o) for o in offsets])
table += prefix_indent(prefix, right_side, later_prefix=' | ')
print(prefix_indent('data sources: ', table), file=output_file)
table = (' name | offsets ...\n' +
'---------+------------...\n')
for name, offsets in list(self.destinations.items()):
prefix = '{:>8} | '.format(name)
right_side = '\n'.join(['{!r:}'.format(o) for o in offsets])
table += prefix_indent(prefix, right_side, later_prefix=' | ')
print(prefix_indent('data destinations: ', table), file=output_file)
table = (' op | count \n' +
'----+-------\n')
for op, count in list(self._flops.items()):
table += '{:>3} | {:>4}\n'.format(op, count)
table += ' =======\n'
table += ' {:>4}'.format(sum(self._flops.values()))
print(prefix_indent('FLOPs: ', table), file=output_file) | python | {
"resource": ""
} |
q273710 | Kernel.print_variables_info | test | def print_variables_info(self, output_file=sys.stdout):
"""Print variables information in human readble format."""
table = (' name | type size \n' +
'---------+-------------------------\n')
for name, var_info in list(self.variables.items()):
table += '{:>8} | {:>6} {!s:<10}\n'.format(name, var_info[0], var_info[1])
print(prefix_indent('variables: ', table), file=output_file) | python | {
"resource": ""
} |
q273711 | Kernel.print_constants_info | test | def print_constants_info(self, output_file=sys.stdout):
"""Print constants information in human readble format."""
table = (' name | value \n' +
'---------+-----------\n')
for name, value in list(self.constants.items()):
table += '{!s:>8} | {:<10}\n'.format(name, value)
print(prefix_indent('constants: ', table), file=output_file) | python | {
"resource": ""
} |
q273712 | KernelCode.print_kernel_code | test | def print_kernel_code(self, output_file=sys.stdout):
"""Print source code of kernel."""
print(self.kernel_code, file=output_file) | python | {
"resource": ""
} |
q273713 | KernelCode.conv_ast_to_sym | test | def conv_ast_to_sym(self, math_ast):
"""
Convert mathematical expressions to a sympy representation.
May only contain paranthesis, addition, subtraction and multiplication from AST.
"""
if type(math_ast) is c_ast.ID:
return symbol_pos_int(math_ast.name)
elif type(math_ast) is c_ast.Constant:
return sympy.Integer(math_ast.value)
else: # elif type(dim) is c_ast.BinaryOp:
op = {
'*': operator.mul,
'+': operator.add,
'-': operator.sub
}
return op[math_ast.op](
self.conv_ast_to_sym(math_ast.left),
self.conv_ast_to_sym(math_ast.right)) | python | {
"resource": ""
} |
q273714 | KernelCode._get_offsets | test | def _get_offsets(self, aref, dim=0):
"""
Return a tuple of offsets of an ArrayRef object in all dimensions.
The index order is right to left (c-code order).
e.g. c[i+1][j-2] -> (-2, +1)
If aref is actually a c_ast.ID, None will be returned.
"""
if isinstance(aref, c_ast.ID):
return None
# Check for restrictions
assert type(aref.name) in [c_ast.ArrayRef, c_ast.ID], \
"array references must only be used with variables or other array references"
assert type(aref.subscript) in [c_ast.ID, c_ast.Constant, c_ast.BinaryOp], \
'array subscript must only contain variables or binary operations'
# Convert subscript to sympy and append
idxs = [self.conv_ast_to_sym(aref.subscript)]
# Check for more indices (multi-dimensional access)
if type(aref.name) is c_ast.ArrayRef:
idxs += self._get_offsets(aref.name, dim=dim+1)
# Reverse to preserver order (the subscripts in the AST are traversed backwards)
if dim == 0:
idxs.reverse()
return tuple(idxs) | python | {
"resource": ""
} |
q273715 | KernelCode._get_basename | test | def _get_basename(cls, aref):
"""
Return base name of ArrayRef object.
e.g. c[i+1][j-2] -> 'c'
"""
if isinstance(aref.name, c_ast.ArrayRef):
return cls._get_basename(aref.name)
elif isinstance(aref.name, str):
return aref.name
else:
return aref.name.name | python | {
"resource": ""
} |
q273716 | KernelCode.get_index_type | test | def get_index_type(self, loop_nest=None):
"""
Return index type used in loop nest.
If index type between loops differ, an exception is raised.
"""
if loop_nest is None:
loop_nest = self.get_kernel_loop_nest()
if type(loop_nest) is c_ast.For:
loop_nest = [loop_nest]
index_types = (None, None)
for s in loop_nest:
if type(s) is c_ast.For:
if type(s.stmt) in [c_ast.For, c_ast.Compound]:
other = self.get_index_type(loop_nest=s.stmt)
else:
other = None
index_types = (s.init.decls[0].type.type.names, other)
break
if index_types[0] == index_types[1] or index_types[1] is None:
return index_types[0]
else:
raise ValueError("Loop indices must have same type, found {}.".format(index_types)) | python | {
"resource": ""
} |
q273717 | KernelCode._build_const_declartions | test | def _build_const_declartions(self, with_init=True):
"""
Generate constants declarations
:return: list of declarations
"""
decls = []
# Use type as provided by user in loop indices
index_type = self.get_index_type()
i = 2 # subscript for cli input, 1 is reserved for repeat
for k in self.constants:
# const long long N = strtoul(argv[2])
# with increasing N and 1
# TODO change subscript of argv depending on constant count
type_decl = c_ast.TypeDecl(k.name, ['const'], c_ast.IdentifierType(index_type))
init = None
if with_init:
init = c_ast.FuncCall(
c_ast.ID('atoi'),
c_ast.ExprList([c_ast.ArrayRef(c_ast.ID('argv'),
c_ast.Constant('int', str(i)))]))
i += 1
decls.append(c_ast.Decl(
k.name, ['const'], [], [],
type_decl, init, None))
return decls | python | {
"resource": ""
} |
q273718 | KernelCode.get_array_declarations | test | def get_array_declarations(self):
"""Return array declarations."""
return [d for d in self.kernel_ast.block_items
if type(d) is c_ast.Decl and type(d.type) is c_ast.ArrayDecl] | python | {
"resource": ""
} |
q273719 | KernelCode.get_kernel_loop_nest | test | def get_kernel_loop_nest(self):
"""Return kernel loop nest including any preceding pragmas and following swaps."""
loop_nest = [s for s in self.kernel_ast.block_items
if type(s) in [c_ast.For, c_ast.Pragma, c_ast.FuncCall]]
assert len(loop_nest) >= 1, "Found to few for statements in kernel"
return loop_nest | python | {
"resource": ""
} |
q273720 | KernelCode._build_array_declarations | test | def _build_array_declarations(self, with_init=True):
"""
Generate declaration statements for arrays.
Also transforming multi-dim to 1d arrays and initializing with malloc.
:param with_init: ommit malloc initialization
:return: list of declarations nodes, dictionary of array names and original dimensions
"""
# copy array declarations from from kernel ast
array_declarations = deepcopy(self.get_array_declarations())
array_dict = []
for d in array_declarations:
# We need to transform
array_dict.append(transform_multidim_to_1d_decl(d))
transform_array_decl_to_malloc(d, with_init=with_init)
return array_declarations, dict(array_dict) | python | {
"resource": ""
} |
q273721 | KernelCode._find_inner_most_loop | test | def _find_inner_most_loop(self, loop_nest):
"""Return inner most for loop in loop nest"""
r = None
for s in loop_nest:
if type(s) is c_ast.For:
return self._find_inner_most_loop(s) or s
else:
r = r or self._find_inner_most_loop(s)
return r | python | {
"resource": ""
} |
q273722 | KernelCode._build_array_initializations | test | def _build_array_initializations(self, array_dimensions):
"""
Generate initialization statements for arrays.
:param array_dimensions: dictionary of array dimensions
:return: list of nodes
"""
kernel = deepcopy(deepcopy(self.get_kernel_loop_nest()))
# traverse to the inner most for loop:
inner_most = self._find_inner_most_loop(kernel)
orig_inner_stmt = inner_most.stmt
inner_most.stmt = c_ast.Compound([])
rand_float_str = str(random.uniform(1.0, 0.1))
# find all array references in original orig_inner_stmt
for aref in find_node_type(orig_inner_stmt, c_ast.ArrayRef):
# transform to 1d references
transform_multidim_to_1d_ref(aref, array_dimensions)
# build static assignments and inject into inner_most.stmt
inner_most.stmt.block_items.append(c_ast.Assignment(
'=', aref, c_ast.Constant('float', rand_float_str)))
return kernel | python | {
"resource": ""
} |
q273723 | KernelCode._build_dummy_calls | test | def _build_dummy_calls(self):
"""
Generate false if branch with dummy calls
Requires kerncraft.h to be included, which defines dummy(...) and var_false.
:return: dummy statement
"""
# Make sure nothing gets removed by inserting dummy calls
dummy_calls = []
for d in self.kernel_ast.block_items:
# Only consider toplevel declarations from kernel ast
if type(d) is not c_ast.Decl: continue
if type(d.type) is c_ast.ArrayDecl:
dummy_calls.append(c_ast.FuncCall(
c_ast.ID('dummy'),
c_ast.ExprList([c_ast.ID(d.name)])))
else:
dummy_calls.append(c_ast.FuncCall(
c_ast.ID('dummy'),
c_ast.ExprList([c_ast.UnaryOp('&', c_ast.ID(d.name))])))
dummy_stmt = c_ast.If(
cond=c_ast.ID('var_false'),
iftrue=c_ast.Compound(dummy_calls),
iffalse=None)
return dummy_stmt | python | {
"resource": ""
} |
q273724 | KernelCode._build_kernel_function_declaration | test | def _build_kernel_function_declaration(self, name='kernel'):
"""Build and return kernel function declaration"""
array_declarations, array_dimensions = self._build_array_declarations(with_init=False)
scalar_declarations = self._build_scalar_declarations(with_init=False)
const_declarations = self._build_const_declartions(with_init=False)
return c_ast.FuncDecl(args=c_ast.ParamList(params=array_declarations + scalar_declarations +
const_declarations),
type=c_ast.TypeDecl(declname=name,
quals=[],
type=c_ast.IdentifierType(names=['void']))) | python | {
"resource": ""
} |
q273725 | KernelCode._build_scalar_declarations | test | def _build_scalar_declarations(self, with_init=True):
"""Build and return scalar variable declarations"""
# copy scalar declarations from from kernel ast
scalar_declarations = [deepcopy(d) for d in self.kernel_ast.block_items
if type(d) is c_ast.Decl and type(d.type) is c_ast.TypeDecl]
# add init values to declarations
if with_init:
random.seed(2342) # we want reproducible random numbers
for d in scalar_declarations:
if d.type.type.names[0] in ['double', 'float']:
d.init = c_ast.Constant('float', str(random.uniform(1.0, 0.1)))
elif d.type.type.names[0] in ['int', 'long', 'long long',
'unsigned int', 'unsigned long', 'unsigned long long']:
d.init = c_ast.Constant('int', 2)
return scalar_declarations | python | {
"resource": ""
} |
q273726 | KernelCode.get_kernel_code | test | def get_kernel_code(self, openmp=False, as_filename=False, name='kernel'):
"""
Generate and return compilable source code with kernel function from AST.
:param openmp: if true, OpenMP code will be generated
:param as_filename: if true, will save to file and return filename
:param name: name of kernel function
"""
assert self.kernel_ast is not None, "AST does not exist, this could be due to running " \
"based on a kernel description rather than code."
file_name = 'kernel'
if openmp:
file_name += '-omp'
file_name += '.c'
fp, already_available = self._get_intermediate_file(
file_name, machine_and_compiler_dependent=False)
# Use already cached version
if already_available:
code = fp.read()
else:
array_declarations, array_dimensions = self._build_array_declarations()
# Prepare actual kernel loop nest
if openmp:
# with OpenMP code
kernel = deepcopy(self.get_kernel_loop_nest())
# find all array references in kernel
for aref in find_node_type(kernel, c_ast.ArrayRef):
# transform to 1d references
transform_multidim_to_1d_ref(aref, array_dimensions)
omp_pragmas = [p for p in find_node_type(kernel, c_ast.Pragma)
if 'omp' in p.string]
# TODO if omp parallel was found, remove it (also replace "parallel for" -> "for")
# if no omp for pragmas are present, insert suitable ones
if not omp_pragmas:
kernel.insert(0, c_ast.Pragma("omp for"))
# otherwise do not change anything
else:
# with original code
kernel = deepcopy(self.get_kernel_loop_nest())
# find all array references in kernel
for aref in find_node_type(kernel, c_ast.ArrayRef):
# transform to 1d references
transform_multidim_to_1d_ref(aref, array_dimensions)
function_ast = c_ast.FuncDef(decl=c_ast.Decl(
name=name, type=self._build_kernel_function_declaration(name=name), quals=[],
storage=[], funcspec=[], init=None, bitsize=None),
body=c_ast.Compound(block_items=kernel),
param_decls=None)
# Generate code
code = CGenerator().visit(function_ast)
# Insert missing #includes from template to top of code
code = '#include "kerncraft.h"\n\n' + code
# Store to file
fp.write(code)
fp.close()
if as_filename:
return fp.name
else:
return code | python | {
"resource": ""
} |
q273727 | KernelCode._build_kernel_call | test | def _build_kernel_call(self, name='kernel'):
"""Generate and return kernel call ast."""
return c_ast.FuncCall(name=c_ast.ID(name=name), args=c_ast.ExprList(exprs=[
c_ast.ID(name=d.name) for d in (
self._build_array_declarations()[0] +
self._build_scalar_declarations() +
self._build_const_declartions())])) | python | {
"resource": ""
} |
q273728 | KernelCode.get_main_code | test | def get_main_code(self, as_filename=False, kernel_function_name='kernel'):
"""
Generate and return compilable source code from AST.
"""
# TODO produce nicer code, including help text and other "comfort features".
assert self.kernel_ast is not None, "AST does not exist, this could be due to running " \
"based on a kernel description rather than code."
fp, already_available = self._get_intermediate_file('main.c',
machine_and_compiler_dependent=False)
# Use already cached version
if already_available:
code = fp.read()
else:
parser = CParser()
template_code = self.CODE_TEMPLATE
template_ast = parser.parse(clean_code(template_code,
macros=True, comments=True, pragmas=False))
ast = deepcopy(template_ast)
# Define and replace DECLARE_CONSTS
replace_id(ast, "DECLARE_CONSTS", self._build_const_declartions(with_init=True))
# Define and replace DECLARE_ARRAYS
array_declarations, array_dimensions = self._build_array_declarations()
replace_id(ast, "DECLARE_ARRAYS", array_declarations)
# Define and replace DECLARE_INIT_SCALARS
replace_id(ast, "DECLARE_INIT_SCALARS", self._build_scalar_declarations())
# Define and replace DUMMY_CALLS
replace_id(ast, "DUMMY_CALLS", self._build_dummy_calls())
# Define and replace KERNEL_DECL
ast.ext.insert(0, self._build_kernel_function_declaration(
name=kernel_function_name))
# Define and replace KERNEL_CALL
replace_id(ast, "KERNEL_CALL", self._build_kernel_call())
# Define and replace INIT_ARRAYS based on previously generated kernel
replace_id(ast, "INIT_ARRAYS", self._build_array_initializations(array_dimensions))
# Generate code
code = CGenerator().visit(ast)
# Insert missing #includes from template to top of code
code = '\n'.join([l for l in template_code.split('\n') if l.startswith("#include")]) + \
'\n\n' + code
# Store to file
fp.write(code)
fp.close()
if as_filename:
return fp.name
else:
return code | python | {
"resource": ""
} |
q273729 | KernelCode.iaca_analysis | test | def iaca_analysis(self, micro_architecture, asm_block='auto',
pointer_increment='auto_with_manual_fallback', verbose=False):
"""
Run an IACA analysis and return its outcome.
*asm_block* controls how the to-be-marked block is chosen. "auto" (default) results in
the largest block, "manual" results in interactive and a number in the according block.
*pointer_increment* is the number of bytes the pointer is incremented after the loop or
- 'auto': automatic detection, RuntimeError is raised in case of failure
- 'auto_with_manual_fallback': automatic detection, fallback to manual input
- 'manual': prompt user
"""
asm_filename = self.compile_kernel(assembly=True, verbose=verbose)
asm_marked_filename = os.path.splitext(asm_filename)[0]+'-iaca.s'
with open(asm_filename, 'r') as in_file, open(asm_marked_filename, 'w') as out_file:
self.asm_block = iaca.iaca_instrumentation(
in_file, out_file,
block_selection=asm_block,
pointer_increment=pointer_increment)
obj_name = self.assemble_to_object(asm_marked_filename, verbose=verbose)
return iaca.iaca_analyse_instrumented_binary(obj_name, micro_architecture), self.asm_block | python | {
"resource": ""
} |
q273730 | KernelCode.build_executable | test | def build_executable(self, lflags=None, verbose=False, openmp=False):
"""Compile source to executable with likwid capabilities and return the executable name."""
compiler, compiler_args = self._machine.get_compiler()
kernel_obj_filename = self.compile_kernel(openmp=openmp, verbose=verbose)
out_filename, already_exists = self._get_intermediate_file(
os.path.splitext(os.path.basename(kernel_obj_filename))[0], binary=True, fp=False)
if not already_exists:
main_source_filename = self.get_main_code(as_filename=True)
if not (('LIKWID_INCLUDE' in os.environ or 'LIKWID_INC' in os.environ) and
'LIKWID_LIB' in os.environ):
print('Could not find LIKWID_INCLUDE (e.g., "-I/app/likwid/4.1.2/include") and '
'LIKWID_LIB (e.g., "-L/apps/likwid/4.1.2/lib") environment variables',
file=sys.stderr)
sys.exit(1)
compiler_args += [
'-std=c99',
'-I'+reduce_path(os.path.abspath(os.path.dirname(
os.path.realpath(__file__)))+'/headers/'),
os.environ.get('LIKWID_INCLUDE', ''),
os.environ.get('LIKWID_INC', ''),
'-llikwid']
# This is a special case for unittesting
if os.environ.get('LIKWID_LIB') == '':
compiler_args = compiler_args[:-1]
if lflags is None:
lflags = []
lflags += os.environ['LIKWID_LIB'].split(' ') + ['-pthread']
compiler_args += os.environ['LIKWID_LIB'].split(' ') + ['-pthread']
infiles = [reduce_path(os.path.abspath(os.path.dirname(
os.path.realpath(__file__)))+'/headers/dummy.c'),
kernel_obj_filename, main_source_filename]
cmd = [compiler] + infiles + compiler_args + ['-o', out_filename]
# remove empty arguments
cmd = list(filter(bool, cmd))
if verbose:
print('Executing (build_executable): ', ' '.join(cmd))
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Build failed:", e, file=sys.stderr)
sys.exit(1)
else:
if verbose:
print('Executing (build_executable): ', 'using cached', out_filename)
return out_filename | python | {
"resource": ""
} |
q273731 | KernelDescription.string_to_sympy | test | def string_to_sympy(cls, s):
"""Convert any string to a sympy object or None."""
if isinstance(s, int):
return sympy.Integer(s)
elif isinstance(s, list):
return tuple([cls.string_to_sympy(e) for e in s])
elif s is None:
return None
else:
# Step 1 build expression with the whole alphabet redefined:
local_dict = {c: symbol_pos_int(c) for c in s if c in string.ascii_letters}
# TODO find nicer solution for N and other pre-mapped letters
preliminary_expr = parse_expr(s, local_dict=local_dict)
# Replace all free symbols with positive integer versions:
local_dict.update(
{s.name: symbol_pos_int(s.name) for s in preliminary_expr.free_symbols})
return parse_expr(s, local_dict=local_dict) | python | {
"resource": ""
} |
q273732 | MachineModel.get_identifier | test | def get_identifier(self):
"""Return identifier which is either the machine file name or sha256 checksum of data."""
if self._path:
return os.path.basename(self._path)
else:
return hashlib.sha256(hashlib.sha256(repr(self._data).encode())).hexdigest() | python | {
"resource": ""
} |
q273733 | MachineModel.get_last_modified_datetime | test | def get_last_modified_datetime(self):
"""Return datetime object of modified time of machine file. Return now if not a file."""
if self._path:
statbuf = os.stat(self._path)
return datetime.utcfromtimestamp(statbuf.st_mtime)
else:
return datetime.now() | python | {
"resource": ""
} |
q273734 | MachineModel.get_cachesim | test | def get_cachesim(self, cores=1):
"""
Return a cachesim.CacheSimulator object based on the machine description.
:param cores: core count (default: 1)
"""
cache_dict = {}
for c in self['memory hierarchy']:
# Skip main memory
if 'cache per group' not in c:
continue
cache_dict[c['level']] = deepcopy(c['cache per group'])
# Scale size of shared caches according to cores
if c['cores per group'] > 1:
cache_dict[c['level']]['sets'] //= cores
cs, caches, mem = cachesim.CacheSimulator.from_dict(cache_dict)
return cs | python | {
"resource": ""
} |
q273735 | MachineModel.get_bandwidth | test | def get_bandwidth(self, cache_level, read_streams, write_streams, threads_per_core, cores=None):
"""
Return best fitting bandwidth according to number of threads, read and write streams.
:param cache_level: integer of cache (0 is L1, 1 is L2 ...)
:param read_streams: number of read streams expected
:param write_streams: number of write streams expected
:param threads_per_core: number of threads that are run on each core
:param cores: if not given, will choose maximum bandwidth for single NUMA domain
"""
# try to find best fitting kernel (closest to read/write ratio):
# write allocate has to be handled in kernel information (all writes are also reads)
# TODO support for non-write-allocate architectures
try:
target_ratio = read_streams/write_streams
except ZeroDivisionError:
target_ratio = float('inf')
measurement_kernel = 'load'
measurement_kernel_info = self['benchmarks']['kernels'][measurement_kernel]
measurement_kernel_ratio = float('inf')
for kernel_name, kernel_info in sorted(self['benchmarks']['kernels'].items()):
try:
kernel_ratio = ((kernel_info['read streams']['streams'] +
kernel_info['write streams']['streams'] -
kernel_info['read+write streams']['streams']) /
kernel_info['write streams']['streams'])
except ZeroDivisionError:
kernel_ratio = float('inf')
if abs(kernel_ratio - target_ratio) < abs(measurement_kernel_ratio - target_ratio):
measurement_kernel = kernel_name
measurement_kernel_info = kernel_info
measurement_kernel_ratio = kernel_ratio
# choose smt, and then use max/saturation bw
bw_level = self['memory hierarchy'][cache_level]['level']
bw_measurements = \
self['benchmarks']['measurements'][bw_level][threads_per_core]
assert threads_per_core == bw_measurements['threads per core'], \
'malformed measurement dictionary in machine file.'
if cores is not None:
# Used by Roofline model
run_index = bw_measurements['cores'].index(cores)
bw = bw_measurements['results'][measurement_kernel][run_index]
else:
# Used by ECM model
# Choose maximum number of cores to get bandwidth for
max_cores = min(self['memory hierarchy'][cache_level]['cores per group'],
self['cores per NUMA domain'])
bw = max(bw_measurements['results'][measurement_kernel][:max_cores])
# Correct bandwidth due to miss-measurement of write allocation
# TODO support non-temporal stores and non-write-allocate architectures
if cache_level == 0:
# L1 does not have write-allocate, so everything is measured correctly
factor = 1.0
else:
factor = (float(measurement_kernel_info['read streams']['bytes']) +
2.0*float(measurement_kernel_info['write streams']['bytes']) -
float(measurement_kernel_info['read+write streams']['bytes'])) / \
(float(measurement_kernel_info['read streams']['bytes']) +
float(measurement_kernel_info['write streams']['bytes']))
bw = bw * factor
return bw, measurement_kernel | python | {
"resource": ""
} |
q273736 | MachineModel.get_compiler | test | def get_compiler(self, compiler=None, flags=None):
"""
Return tuple of compiler and compiler flags.
Selects compiler and flags from machine description file, commandline arguments or call
arguements.
"""
if self._args:
compiler = compiler or self._args.compiler
flags = flags or self._args.compiler_flags
if compiler is None:
# Select first available compiler in machine description file's compiler dict
for c in self['compiler'].keys():
# Making sure compiler is available:
if find_executable(c) is not None:
compiler = c
break
else:
raise RuntimeError("No compiler ({}) was found. Add different one in machine file, "
"via --compiler argument or make sure it will be found in "
"$PATH.".format(list(self['compiler'].keys())))
if flags is None:
# Select from machine description file
flags = self['compiler'].get(compiler, '')
return compiler, flags.split(' ') | python | {
"resource": ""
} |
q273737 | MachineModel.parse_perfctr_event | test | def parse_perfctr_event(perfctr):
"""
Parse events in machine description to tuple representation used in Benchmark module.
Examples:
>>> parse_perfctr_event('PERF_EVENT:REG[0-3]')
('PERF_EVENT', 'REG[0-3]')
>>> parse_perfctr_event('PERF_EVENT:REG[0-3]:STAY:FOO=23:BAR=0x23')
('PERF_EVENT', 'REG[0-3]', {'STAY': None, 'FOO': 23, 'BAR': 35})
"""
split_perfctr = perfctr.split(':')
assert len(split_perfctr) >= 2, "Atleast one colon (:) is required in the event name"
event_tuple = split_perfctr[:2]
parameters = {}
for p in split_perfctr[2:]:
if '=' in p:
k, v = p.split('=')
if v.startswith('0x'):
parameters[k] = int(v, 16)
else:
parameters[k] = int(v)
else:
parameters[p] = None
event_tuple.append(parameters)
return tuple(event_tuple) | python | {
"resource": ""
} |
q273738 | Intervals._enforce_no_overlap | test | def _enforce_no_overlap(self, start_at=0):
"""Enforce that no ranges overlap in internal storage."""
i = start_at
while i+1 < len(self.data):
if self.data[i][1] >= self.data[i+1][0]:
# beginning of i+1-th range is contained in i-th range
if self.data[i][1] < self.data[i+1][1]:
# i+1-th range is longer, thus enlarge i-th range
self.data[i][1] = self.data[i+1][1]
# removed contained range
del self.data[i+1]
i += 1 | python | {
"resource": ""
} |
q273739 | get_header_path | test | def get_header_path() -> str:
"""Return local folder path of header files."""
import os
return os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/headers/' | python | {
"resource": ""
} |
q273740 | CacheSimulationPredictor._align_iteration_with_cl_boundary | test | def _align_iteration_with_cl_boundary(self, iteration, subtract=True):
"""Align iteration with cacheline boundary."""
# FIXME handle multiple datatypes
element_size = self.kernel.datatypes_size[self.kernel.datatype]
cacheline_size = self.machine['cacheline size']
elements_per_cacheline = int(cacheline_size // element_size)
# Gathering some loop information:
inner_loop = list(self.kernel.get_loop_stack(subs_consts=True))[-1]
inner_increment = inner_loop['increment']
# do this by aligning either writes (preferred) or reads
# Assumption: writes (and reads) increase linearly
o = self.kernel.compile_global_offsets(iteration=iteration)[0]
if len(o[1]):
# we have a write to work with:
first_offset = min(o[1])
else:
# we use reads
first_offset = min(o[0])
diff = first_offset - \
(int(first_offset) >> self.csim.first_level.cl_bits << self.csim.first_level.cl_bits)
if diff == 0:
return iteration
elif subtract:
return iteration - (diff // element_size) // inner_increment
else:
return iteration + (elements_per_cacheline - diff // element_size) // inner_increment | python | {
"resource": ""
} |
q273741 | CacheSimulationPredictor.get_loads | test | def get_loads(self):
"""Return a list with number of loaded cache lines per memory hierarchy level."""
return [self.stats[cache_level]['LOAD_count'] / self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))] | python | {
"resource": ""
} |
q273742 | CacheSimulationPredictor.get_hits | test | def get_hits(self):
"""Return a list with number of hit cache lines per memory hierarchy level."""
return [self.stats[cache_level]['HIT_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))] | python | {
"resource": ""
} |
q273743 | CacheSimulationPredictor.get_misses | test | def get_misses(self):
"""Return a list with number of missed cache lines per memory hierarchy level."""
return [self.stats[cache_level]['MISS_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))] | python | {
"resource": ""
} |
q273744 | CacheSimulationPredictor.get_stores | test | def get_stores(self):
"""Return a list with number of stored cache lines per memory hierarchy level."""
return [self.stats[cache_level]['STORE_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))] | python | {
"resource": ""
} |
q273745 | CacheSimulationPredictor.get_evicts | test | def get_evicts(self):
"""Return a list with number of evicted cache lines per memory hierarchy level."""
return [self.stats[cache_level]['EVICT_count']/self.first_dim_factor
for cache_level in range(len(self.machine['memory hierarchy']))] | python | {
"resource": ""
} |
q273746 | CacheSimulationPredictor.get_infos | test | def get_infos(self):
"""Return verbose information about the predictor."""
first_dim_factor = self.first_dim_factor
infos = {'memory hierarchy': [], 'cache stats': self.stats,
'cachelines in stats': first_dim_factor}
for cache_level, cache_info in list(enumerate(self.machine['memory hierarchy'])):
infos['memory hierarchy'].append({
'index': len(infos['memory hierarchy']),
'level': '{}'.format(cache_info['level']),
'total loads': self.stats[cache_level]['LOAD_byte']/first_dim_factor,
'total misses': self.stats[cache_level]['MISS_byte']/first_dim_factor,
'total hits': self.stats[cache_level]['HIT_byte']/first_dim_factor,
'total stores': self.stats[cache_level]['STORE_byte']/first_dim_factor,
'total evicts': self.stats[cache_level]['EVICT_byte']/first_dim_factor,
'total lines load': self.stats[cache_level]['LOAD_count']/first_dim_factor,
'total lines misses': self.stats[cache_level]['MISS_count']/first_dim_factor,
'total lines hits': self.stats[cache_level]['HIT_count']/first_dim_factor,
'total lines stores': self.stats[cache_level]['STORE_count']/first_dim_factor,
'total lines evicts': self.stats[cache_level]['EVICT_count']/first_dim_factor,
'cycles': None})
return infos | python | {
"resource": ""
} |
q273747 | fix_env_variable | test | def fix_env_variable(name, value):
"""Fix environment variable to a value within context. Unset if value is None."""
orig = os.environ.get(name, None)
if value is not None:
# Set if value is not None
os.environ[name] = value
elif name in os.environ:
# Unset if value is None
del os.environ[name]
try:
yield
finally:
if orig is not None:
# Restore original value
os.environ[name] = orig
elif name in os.environ:
# Unset
del os.environ[name] | python | {
"resource": ""
} |
q273748 | Benchmark.configure_arggroup | test | def configure_arggroup(cls, parser):
"""Configure argument parser."""
parser.add_argument(
'--no-phenoecm', action='store_true',
help='Disables the phenomenological ECM model building.')
parser.add_argument(
'--iterations', type=int, default=10,
help='Number of outer-loop iterations (e.g. time loop) during benchmarking. '
'Default is 10, but actual number will be adapted to at least 0.2s runtime.')
parser.add_argument(
'--ignore-warnings', action='store_true',
help='Ignore warnings about missmatched CPU model and frequency.') | python | {
"resource": ""
} |
q273749 | Benchmark.report | test | def report(self, output_file=sys.stdout):
"""Report gathered analysis data in human readable form."""
if self.verbose > 1:
with pprint_nosort():
pprint.pprint(self.results)
if self.verbose > 0:
print('Runtime (per repetition): {:.2g} s'.format(
self.results['Runtime (per repetition) [s]']),
file=output_file)
if self.verbose > 0:
print('Iterations per repetition: {!s}'.format(
self.results['Iterations per repetition']),
file=output_file)
print('Runtime (per cacheline update): {:.2f} cy/CL'.format(
self.results['Runtime (per cacheline update) [cy/CL]']),
file=output_file)
print('MEM volume (per repetition): {:.0f} Byte'.format(
self.results['MEM volume (per repetition) [B]']),
file=output_file)
print('Performance: {:.2f} MFLOP/s'.format(self.results['Performance [MFLOP/s]']),
file=output_file)
print('Performance: {:.2f} MLUP/s'.format(self.results['Performance [MLUP/s]']),
file=output_file)
print('Performance: {:.2f} It/s'.format(self.results['Performance [MIt/s]']),
file=output_file)
if self.verbose > 0:
print('MEM bandwidth: {:.2f} MByte/s'.format(self.results['MEM BW [MByte/s]']),
file=output_file)
print('', file=output_file)
if not self.no_phenoecm:
print("Data Transfers:")
print("{:^8} |".format("cache"), end='')
for metrics in self.results['data transfers'].values():
for metric_name in sorted(metrics):
print(" {:^14}".format(metric_name), end='')
print()
break
for cache, metrics in sorted(self.results['data transfers'].items()):
print("{!s:^8} |".format(cache), end='')
for k, v in sorted(metrics.items()):
print(" {!s:^14}".format(v), end='')
print()
print()
print('Phenomenological ECM model: {{ {T_OL:.1f} || {T_nOL:.1f} | {T_L1L2:.1f} | '
'{T_L2L3:.1f} | {T_L3MEM:.1f} }} cy/CL'.format(
**{k: float(v) for k, v in self.results['ECM'].items()}),
file=output_file)
print('T_OL assumes that two loads per cycle may be retiered, which is true for '
'128bit SSE/half-AVX loads on SNB and IVY, and 256bit full-AVX loads on HSW, '
'BDW, SKL and SKX, but it also depends on AGU availability.',
file=output_file) | python | {
"resource": ""
} |
q273750 | parse_description | test | def parse_description():
"""
Parse the description in the README file
CommandLine:
python -c "import setup; print(setup.parse_description())"
"""
from os.path import dirname, join, exists
readme_fpath = join(dirname(__file__), 'README.md')
# print('readme_fpath = %r' % (readme_fpath,))
# This breaks on pip install, so check that it exists.
if exists(readme_fpath):
# try:
# # convert markdown to rst for pypi
# import pypandoc
# return pypandoc.convert(readme_fpath, 'rst')
# except Exception as ex:
# strip out markdown to make a clean readme for pypi
textlines = []
with open(readme_fpath, 'r') as f:
capture = False
for line in f.readlines():
if '# Purpose' in line:
capture = True
elif line.startswith('##'):
break
elif capture:
textlines += [line]
text = ''.join(textlines).strip()
text = text.replace('\n\n', '_NLHACK_')
text = text.replace('\n', ' ')
text = text.replace('_NLHACK_', '\n\n')
return text
return '' | python | {
"resource": ""
} |
q273751 | schedule_retry | test | def schedule_retry(self, config):
"""Schedule a retry"""
raise self.retry(countdown=config.get('SAILTHRU_RETRY_SECONDS'),
max_retries=config.get('SAILTHRU_RETRY_ATTEMPTS')) | python | {
"resource": ""
} |
q273752 | _build_purchase_item | test | def _build_purchase_item(course_id, course_url, cost_in_cents, mode, course_data, sku):
"""Build and return Sailthru purchase item object"""
# build item description
item = {
'id': "{}-{}".format(course_id, mode),
'url': course_url,
'price': cost_in_cents,
'qty': 1,
}
# get title from course info if we don't already have it from Sailthru
if 'title' in course_data:
item['title'] = course_data['title']
else:
# can't find, just invent title
item['title'] = 'Course {} mode: {}'.format(course_id, mode)
if 'tags' in course_data:
item['tags'] = course_data['tags']
# add vars to item
item['vars'] = dict(course_data.get('vars', {}), mode=mode, course_run_id=course_id)
item['vars']['purchase_sku'] = sku
return item | python | {
"resource": ""
} |
q273753 | _record_purchase | test | def _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options):
"""Record a purchase in Sailthru
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
item (dict): Sailthru required information about the course
purchase_incomplete (boolean): True if adding item to shopping cart
message_id (str): Cookie used to identify marketing campaign
options (dict): Sailthru purchase API options (e.g. template name)
Returns:
False if retryable error, else True
"""
try:
sailthru_response = sailthru_client.purchase(email, [item],
incomplete=purchase_incomplete, message_id=message_id,
options=options)
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to record purchase in Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
except SailthruClientError as exc:
logger.exception("Exception attempting to record purchase for %s in Sailthru - %s", email, text_type(exc))
return False
return True | python | {
"resource": ""
} |
q273754 | _get_course_content | test | def _get_course_content(course_id, course_url, sailthru_client, site_code, config):
"""Get course information using the Sailthru content api or from cache.
If there is an error, just return with an empty response.
Arguments:
course_id (str): course key of the course
course_url (str): LMS url for course info page.
sailthru_client (object): SailthruClient
site_code (str): site code
config (dict): config options
Returns:
course information from Sailthru
"""
# check cache first
cache_key = "{}:{}".format(site_code, course_url)
response = cache.get(cache_key)
if not response:
try:
sailthru_response = sailthru_client.api_get("content", {"id": course_url})
if not sailthru_response.is_ok():
response = {}
else:
response = sailthru_response.json
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
except SailthruClientError:
response = {}
if not response:
logger.error('Could not get course data from Sailthru on enroll/purchase event. '
'Calling Ecommerce Course API to get course info for enrollment confirmation email')
response = _get_course_content_from_ecommerce(course_id, site_code=site_code)
if response:
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
return response | python | {
"resource": ""
} |
q273755 | _get_course_content_from_ecommerce | test | def _get_course_content_from_ecommerce(course_id, site_code=None):
"""
Get course information using the Ecommerce course api.
In case of error returns empty response.
Arguments:
course_id (str): course key of the course
site_code (str): site code
Returns:
course information from Ecommerce
"""
api = get_ecommerce_client(site_code=site_code)
try:
api_response = api.courses(course_id).get()
except Exception: # pylint: disable=broad-except
logger.exception(
'An error occurred while retrieving data for course run [%s] from the Catalog API.',
course_id,
exc_info=True
)
return {}
return {
'title': api_response.get('name'),
'verification_deadline': api_response.get('verification_deadline')
} | python | {
"resource": ""
} |
q273756 | _update_unenrolled_list | test | def _update_unenrolled_list(sailthru_client, email, course_url, unenroll):
"""Maintain a list of courses the user has unenrolled from in the Sailthru user record
Arguments:
sailthru_client (object): SailthruClient
email (str): user's email address
course_url (str): LMS url for course info page.
unenroll (boolean): True if unenrolling, False if enrolling
Returns:
False if retryable error, else True
"""
try:
# get the user 'vars' values from sailthru
sailthru_response = sailthru_client.api_get("user", {"id": email, "fields": {"vars": 1}})
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to read user record from Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
response_json = sailthru_response.json
unenroll_list = []
if response_json and "vars" in response_json and response_json["vars"] \
and "unenrolled" in response_json["vars"]:
unenroll_list = response_json["vars"]["unenrolled"]
changed = False
# if unenrolling, add course to unenroll list
if unenroll:
if course_url not in unenroll_list:
unenroll_list.append(course_url)
changed = True
# if enrolling, remove course from unenroll list
elif course_url in unenroll_list:
unenroll_list.remove(course_url)
changed = True
if changed:
# write user record back
sailthru_response = sailthru_client.api_post(
'user', {'id': email, 'key': 'email', 'vars': {'unenrolled': unenroll_list}})
if not sailthru_response.is_ok():
error = sailthru_response.get_error()
logger.error("Error attempting to update user record in Sailthru: %s", error.get_message())
return not can_retry_sailthru_request(error)
return True
except SailthruClientError as exc:
logger.exception("Exception attempting to update user record for %s in Sailthru - %s", email, text_type(exc))
return False | python | {
"resource": ""
} |
q273757 | send_course_refund_email | test | def send_course_refund_email(self, email, refund_id, amount, course_name, order_number, order_url, site_code=None):
""" Sends the course refund email.
Args:
self: Ignore.
email (str): Recipient's email address.
refund_id (int): ID of the refund that initiated this task.
amount (str): Formatted amount of the refund.
course_name (str): Name of the course for which payment was refunded.
order_number (str): Order number of the order that was refunded.
order_url (str): Receipt URL of the refunded order.
site_code (str): Identifier of the site sending the email.
"""
config = get_sailthru_configuration(site_code)
try:
sailthru_client = get_sailthru_client(site_code)
except SailthruError:
# NOTE: We rely on the function to log the error for us
return
email_vars = {
'amount': amount,
'course_name': course_name,
'order_number': order_number,
'order_url': order_url,
}
try:
response = sailthru_client.send(
template=config['templates']['course_refund'],
email=email,
_vars=email_vars
)
except SailthruClientError:
logger.exception(
'A client error occurred while attempting to send a course refund notification for refund [%d].',
refund_id
)
return
if response.is_ok():
logger.info('Course refund notification sent for refund %d.', refund_id)
else:
error = response.get_error()
logger.error(
'An error occurred while attempting to send a course refund notification for refund [%d]: %d - %s',
refund_id, error.get_error_code(), error.get_message()
)
if can_retry_sailthru_request(error):
logger.info(
'An attempt will be made again to send a course refund notification for refund [%d].',
refund_id
)
schedule_retry(self, config)
else:
logger.warning(
'No further attempts will be made to send a course refund notification for refund [%d].',
refund_id
) | python | {
"resource": ""
} |
q273758 | _send_offer_assignment_notification_email | test | def _send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, task):
"""Handles sending offer assignment notification emails and retrying failed emails when appropriate."""
try:
sailthru_client = get_sailthru_client(site_code)
except SailthruError:
logger.exception(
'[Offer Assignment] A client error occurred while attempting to send a offer assignment notification.'
' Message: {message}'.format(message=email_body)
)
return None
email_vars = {
'subject': subject,
'email_body': email_body,
}
try:
response = sailthru_client.send(
template=config['templates']['assignment_email'],
email=user_email,
_vars=email_vars
)
except SailthruClientError:
logger.exception(
'[Offer Assignment] A client error occurred while attempting to send a offer assignment notification.'
' Message: {message}'.format(message=email_body)
)
return None
if not response.is_ok():
error = response.get_error()
logger.error(
'[Offer Assignment] A {token_error_code} - {token_error_message} error occurred'
' while attempting to send a offer assignment notification.'
' Message: {message}'.format(
message=email_body,
token_error_code=error.get_error_code(),
token_error_message=error.get_message()
)
)
if can_retry_sailthru_request(error):
logger.info(
'[Offer Assignment] An attempt will be made to resend the offer assignment notification.'
' Message: {message}'.format(message=email_body)
)
schedule_retry(task, config)
else:
logger.warning(
'[Offer Assignment] No further attempts will be made to send the offer assignment notification.'
' Failed Message: {message}'.format(message=email_body)
)
return response | python | {
"resource": ""
} |
q273759 | get_logger_config | test | def get_logger_config(log_dir='/var/tmp',
logging_env='no_env',
edx_filename='edx.log',
dev_env=False,
debug=False,
local_loglevel='INFO',
service_variant='ecomworker'):
"""
Returns a dictionary containing logging configuration.
If dev_env is True, logging will not be done via local rsyslogd.
Instead, application logs will be dropped into log_dir. 'edx_filename'
is ignored unless dev_env is True.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
local_loglevel = 'INFO'
hostname = platform.node().split('.')[0]
syslog_format = (
'[service_variant={service_variant}]'
'[%(name)s][env:{logging_env}] %(levelname)s '
'[{hostname} %(process)d] [%(filename)s:%(lineno)d] '
'- %(message)s'
).format(
service_variant=service_variant,
logging_env=logging_env, hostname=hostname
)
if debug:
handlers = ['console']
else:
handlers = ['local']
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'handlers': {
'console': {
'level': 'DEBUG' if debug else 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout,
},
},
'loggers': {
'requests': {
'handlers': handlers,
'level': 'WARNING',
'propagate': True
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
}
}
if dev_env:
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
# Use a different address for Mac OS X
'address': '/var/run/syslog' if sys.platform == 'darwin' else '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
})
return logger_config | python | {
"resource": ""
} |
q273760 | _retry_order | test | def _retry_order(self, exception, max_fulfillment_retries, order_number):
"""
Retry with exponential backoff until fulfillment
succeeds or the retry limit is reached. If the retry limit is exceeded,
the exception is re-raised.
"""
retries = self.request.retries
if retries == max_fulfillment_retries:
logger.exception('Fulfillment of order [%s] failed. Giving up.', order_number)
else:
logger.warning('Fulfillment of order [%s] failed. Retrying.', order_number)
countdown = 2 ** retries
raise self.retry(exc=exception, countdown=countdown, max_retries=max_fulfillment_retries) | python | {
"resource": ""
} |
q273761 | fulfill_order | test | def fulfill_order(self, order_number, site_code=None, email_opt_in=False):
"""Fulfills an order.
Arguments:
order_number (str): Order number indicating which order to fulfill.
Returns:
None
"""
max_fulfillment_retries = get_configuration('MAX_FULFILLMENT_RETRIES', site_code=site_code)
api = get_ecommerce_client(site_code=site_code)
try:
logger.info('Requesting fulfillment of order [%s].', order_number)
api.orders(order_number).fulfill.put(email_opt_in=email_opt_in)
except exceptions.HttpClientError as exc:
status_code = exc.response.status_code # pylint: disable=no-member
if status_code == 406:
# The order is not fulfillable. Therefore, it must be complete.
logger.info('Order [%s] has already been fulfilled. Ignoring.', order_number)
raise Ignore()
else:
# Unknown client error. Let's retry to resolve it.
logger.warning(
'Fulfillment of order [%s] failed because of HttpClientError. Retrying',
order_number,
exc_info=True
)
_retry_order(self, exc, max_fulfillment_retries, order_number)
except (exceptions.HttpServerError, exceptions.Timeout, SSLError) as exc:
# Fulfillment failed, retry
_retry_order(self, exc, max_fulfillment_retries, order_number) | python | {
"resource": ""
} |
q273762 | get_sailthru_client | test | def get_sailthru_client(site_code):
"""
Returns a Sailthru client for the specified site.
Args:
site_code (str): Site for which the client should be configured.
Returns:
SailthruClient
Raises:
SailthruNotEnabled: If Sailthru is not enabled for the specified site.
ConfigurationError: If either the Sailthru API key or secret are not set for the site.
"""
# Get configuration
config = get_sailthru_configuration(site_code)
# Return if Sailthru integration disabled
if not config.get('SAILTHRU_ENABLE'):
msg = 'Sailthru is not enabled for site {}'.format(site_code)
log.debug(msg)
raise SailthruNotEnabled(msg)
# Make sure key and secret configured
key = config.get('SAILTHRU_KEY')
secret = config.get('SAILTHRU_SECRET')
if not (key and secret):
msg = 'Both key and secret are required for site {}'.format(site_code)
log.error(msg)
raise ConfigurationError(msg)
return SailthruClient(key, secret) | python | {
"resource": ""
} |
q273763 | Cache.get | test | def get(self, key):
"""Get an object from the cache
Arguments:
key (str): Cache key
Returns:
Cached object
"""
lock.acquire()
try:
if key not in self:
return None
current_time = time.time()
if self[key].expire > current_time:
return self[key].value
# expired key, clean out all expired keys
deletes = []
for k, val in self.items():
if val.expire <= current_time:
deletes.append(k)
for k in deletes:
del self[k]
return None
finally:
lock.release() | python | {
"resource": ""
} |
q273764 | Cache.set | test | def set(self, key, value, duration):
"""Save an object in the cache
Arguments:
key (str): Cache key
value (object): object to cache
duration (int): time in seconds to keep object in cache
"""
lock.acquire()
try:
self[key] = CacheObject(value, duration)
finally:
lock.release() | python | {
"resource": ""
} |
q273765 | get_configuration | test | def get_configuration(variable, site_code=None):
"""
Get a value from configuration.
Retrieves the value corresponding to the given variable from the configuration module
currently in use by the app. Specify a site_code value to check for a site-specific override.
Arguments:
variable (str): The name of a variable from the configuration module.
Keyword Arguments:
site_code (str): The SITE_OVERRIDES key to inspect for site-specific values
Returns:
The value corresponding to the variable, or None if the variable is not found.
"""
name = os.environ.get(CONFIGURATION_MODULE)
# __import__ performs a full import, but only returns the top-level
# package, not the targeted module. sys.modules is a dictionary
# mapping module names to loaded modules.
__import__(name)
module = sys.modules[name]
# Locate the setting in the specified module, then attempt to apply a site-specific override
setting_value = getattr(module, variable, None)
site_overrides = getattr(module, 'SITE_OVERRIDES', None)
if site_overrides and site_code is not None:
site_specific_overrides = site_overrides.get(site_code)
if site_specific_overrides:
override_value = site_specific_overrides.get(variable)
if override_value:
setting_value = override_value
if setting_value is None:
raise RuntimeError('Worker is improperly configured: {} is unset in {}.'.format(variable, module))
return setting_value | python | {
"resource": ""
} |
q273766 | get_overrides_filename | test | def get_overrides_filename(variable):
"""
Get the name of the file containing configuration overrides
from the provided environment variable.
"""
filename = os.environ.get(variable)
if filename is None:
msg = 'Please set the {} environment variable.'.format(variable)
raise EnvironmentError(msg)
return filename | python | {
"resource": ""
} |
q273767 | get_value_by_version | test | def get_value_by_version(d):
"""
Finds the value depending in current eplus version.
Parameters
----------
d: dict
{(0, 0): value, (x, x): value, ...}
for current version (cv), current value is the value of version v such as v <= cv < v+1
"""
from oplus import CONF # touchy import
cv = CONF.eplus_version[:2]
for v, value in sorted(d.items(), reverse=True):
if cv >= v:
return value | python | {
"resource": ""
} |
q273768 | _Conf.eplus_version | test | def eplus_version(self):
"""
if _eplus_version is defined => _eplus_version
else most recent eplus available version
"""
# check energy plus is installed
if len(self.eplus_available_versions) == 0:
raise RuntimeError("Energy plus is not install, can't use oplus package.")
# see if version is defined
if self._eplus_version is not None:
return self._eplus_version
# return most recent version
return sorted(self.eplus_available_versions.keys(), reverse=True)[0] | python | {
"resource": ""
} |
q273769 | Simulation._file_refs | test | def _file_refs(self):
"""
Defined here so that we can use the class variables, in order to subclass in oplusplus
"""
if self._prepared_file_refs is None:
self._prepared_file_refs = {
FILE_REFS.idf: FileInfo(
constructor=lambda path: self._epm_cls.from_idf(path, idd_or_buffer_or_path=self._idd),
get_path=lambda: get_input_file_path(self.dir_path, FILE_REFS.idf)
),
FILE_REFS.epw: FileInfo(
constructor=lambda path: self._weather_data_cls.from_epw(path),
get_path=lambda: get_input_file_path(self.dir_path, FILE_REFS.epw)
),
FILE_REFS.eio: FileInfo(
constructor=lambda path: self._eio_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.eio)
),
FILE_REFS.eso: FileInfo(
constructor=lambda path: self._standard_output_cls(path),
get_path=lambda: get_output_file_path(
self.dir_path,
FILE_REFS.eso
)
),
FILE_REFS.mtr: FileInfo(
constructor=lambda path: self._standard_output_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mtr)
),
FILE_REFS.mtd: FileInfo(
constructor=lambda path: self._mtd_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mtd)
),
FILE_REFS.mdd: FileInfo(
constructor=lambda path: open(path).read(),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mdd)
),
FILE_REFS.err: FileInfo(
constructor=lambda path: self._err_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.err)
),
FILE_REFS.summary_table: FileInfo(
constructor=lambda path: self._summary_table_cls(path),
get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.summary_table)
)
}
return self._prepared_file_refs | python | {
"resource": ""
} |
q273770 | Epm._dev_populate_from_json_data | test | def _dev_populate_from_json_data(self, json_data):
"""
!! Must only be called once, when empty !!
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. add inert
# * data is checked
# * old links are unregistered
# * record is stored in table (=> pk uniqueness is checked)
# 2. activate: hooks, links, external files
# manage comment if any
comment = json_data.pop("_comment", None)
if comment is not None:
self._comment = comment
# populate external files
external_files_data = json_data.pop("_external_files", dict())
self._dev_external_files_manager.populate_from_json_data(external_files_data)
# manage records
added_records = []
for table_ref, json_data_records in json_data.items():
# find table
table = getattr(self, table_ref)
# create record (inert)
records = table._dev_add_inert(json_data_records)
# add records (inert)
added_records.extend(records)
# activate hooks
for r in added_records:
r._dev_activate_hooks()
# activate links and external files
for r in added_records:
r._dev_activate_links()
r._dev_activate_external_files() | python | {
"resource": ""
} |
q273771 | Epm.get_external_files | test | def get_external_files(self):
"""
An external file manages file paths.
"""
external_files = []
for table in self._tables.values():
for r in table:
external_files.extend([ef for ef in r.get_external_files()])
return external_files | python | {
"resource": ""
} |
q273772 | Epm.set_defaults | test | def set_defaults(self):
"""
All fields of Epm with a default value and that are null will be set to their default value.
"""
for table in self._tables.values():
for r in table:
r.set_defaults() | python | {
"resource": ""
} |
q273773 | TableDescriptor.prepare_extensible | test | def prepare_extensible(self):
"""
This function finishes initialization, must be called once all field descriptors and tag have been filled.
"""
# see if extensible and store cycle len
for k in self._tags:
if "extensible" in k:
cycle_len = int(k.split(":")[1])
break
else:
# not extensible
return
# find cycle start and prepare patterns
cycle_start = None
cycle_patterns = []
for i, field_descriptor in enumerate(self._field_descriptors):
# quit if finished
if (cycle_start is not None) and (i >= (cycle_start + cycle_len)):
break
# set cycle start if not set yet
if (cycle_start is None) and ("begin-extensible" in field_descriptor.tags):
cycle_start = i
# leave if cycle start not reached yet
if cycle_start is None:
continue
# store pattern
cycle_patterns.append(field_descriptor.ref.replace("1", r"(\d+)"))
else:
raise RuntimeError("cycle start not found")
# detach unnecessary field descriptors
self._field_descriptors = self._field_descriptors[:cycle_start + cycle_len]
# store cycle info
self.extensible_info = (cycle_start, cycle_len, tuple(cycle_patterns))
# set field descriptor cycle_start index (for error messages while serialization)
for i, fd in enumerate(self._field_descriptors[cycle_start:]):
fd.set_extensible_info(cycle_start, cycle_len, cycle_patterns[i]) | python | {
"resource": ""
} |
q273774 | TableDescriptor.get_extended_name | test | def get_extended_name(self, index):
"""
manages extensible names
"""
field_descriptor = self.get_field_descriptor(index)
if self.extensible_info is None:
return field_descriptor.name
cycle_start, cycle_len, _ = self.extensible_info
cycle_num = (index - cycle_start) // cycle_len
return None if field_descriptor.name is None else field_descriptor.name.replace("1", str(cycle_num)) | python | {
"resource": ""
} |
q273775 | ExternalFilesManager.short_refs | test | def short_refs(self):
"""
we calculate on the fly to avoid managing registrations and un-registrations
Returns
-------
{ref: short_ref, ...
"""
naive_short_refs_d = dict() # naive_short_ref: {refs, ...}
for ef in self._external_files:
if ef.naive_short_ref not in naive_short_refs_d:
naive_short_refs_d[ef.naive_short_ref] = set()
naive_short_refs_d[ef.naive_short_ref].add(ef.ref)
short_refs = dict()
for naive_short_ref, refs in naive_short_refs_d.items():
if len(refs) == 1:
short_refs[refs.pop()] = naive_short_ref
continue
base, ext = os.path.splitext(naive_short_ref)
for i, ref in enumerate(sorted(refs)):
short_refs[ref] = f"{base}-{i}.{ext}"
return short_refs | python | {
"resource": ""
} |
q273776 | EioTable.get_value | test | def get_value(self, column_name_or_i, filter_column_name_or_i, filter_criterion):
"""
Returns first occurrence of value of filter column matching filter criterion.
"""
# find column indexes
column_i = self._get_column_index(column_name_or_i)
filter_column_i = self._get_column_index(filter_column_name_or_i)
filter_fct = {
float: lambda x: float(x) == filter_criterion,
int: lambda x: int(x) == filter_criterion,
str: lambda x: x.lower() == filter_criterion.lower()
}[type(filter_criterion)]
for row_i, row in enumerate(self._data):
if filter_fct(row[filter_column_i]):
break
else:
raise ValueError("Filter did not return any values.")
return self._data[row_i][column_i] | python | {
"resource": ""
} |
q273777 | Record._update_value_inert | test | def _update_value_inert(self, index, value):
"""
is only called by _update_inert
"""
# get field descriptor
field_descriptor = self._table._dev_descriptor.get_field_descriptor(index)
# prepare value
value = field_descriptor.deserialize(value, index)
# unregister previous link if relevant
if isinstance(value, Link):
# de-activate current link if any
current_link = self._data.get(index)
if current_link is not None:
current_link.unregister()
# unregister previous hook if relevant
if isinstance(value, RecordHook):
current_record_hook = self._data.get(index)
if current_record_hook is not None:
current_record_hook.unregister()
# unregister previous external file if relevant
if isinstance(value, ExternalFile):
current_external_file = self._data.get(index)
if current_external_file is not None:
current_external_file._dev_unregister()
# if None remove and leave
if value in (None, NONE_RECORD_HOOK, NONE_LINK, NONE_EXTERNAL_FILE):
# we don't check required, because this method is called by _update_inert which does the job
self._dev_set_none_without_unregistering(index, check_not_required=False)
return
# if relevant, store current pk to signal table
old_hook = None
if index == 0 and not self._table._dev_auto_pk:
old_hook = self._data.get(0) # we use get, because record may not have a pk yet if it is being created
# set value
self._data[index] = value
# signal pk update if relevant
if old_hook is not None:
self._table._dev_record_pk_was_updated(old_hook.target_value) | python | {
"resource": ""
} |
q273778 | Record.update | test | def update(self, data=None, **or_data):
"""
Updates simultaneously all given fields.
Parameters
----------
data: dictionary containing field lowercase names or index as keys, and field values as values (dict syntax)
or_data: keyword arguments containing field names as keys (kwargs syntax)
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. add inert
# * data is checked
# * old links are unregistered
# * record is stored in table (=> pk uniqueness is checked)
# 2. activate: hooks, links, external files
data = or_data if data is None else data
self._update_inert(data)
self._dev_activate_hooks()
self._dev_activate_links()
self._dev_activate_external_files() | python | {
"resource": ""
} |
q273779 | Record.set_defaults | test | def set_defaults(self):
"""
sets all empty fields for which a default value is defined to default value
"""
defaults = {}
for i in range(len(self)):
if i in self._data:
continue
default = self.get_field_descriptor(i).tags.get("default", [None])[0]
if default is not None:
defaults[i] = default
self.update(defaults) | python | {
"resource": ""
} |
q273780 | Record.add_fields | test | def add_fields(self, *args):
"""
This method only works for extensible fields. It allows to add values without precising their fields' names
or indexes.
Parameters
----------
args: field values
"""
if not self.is_extensible():
raise TypeError("Can't use add_fields on a non extensible record.")
# prepare update data
self_len = len(self)
data = dict([(self_len + i, args[i]) for i in range(len(args))])
# update
self.update(data) | python | {
"resource": ""
} |
q273781 | Record.pop | test | def pop(self, index=None):
"""
This method only works for extensible fields. It allows to remove a value and shift all other values to fill
the gap.
Parameters
----------
index: int, default None
index of field to remove.
Returns
-------
serialize value of popped field
"""
# prepare index (will check for extensible)
index = self._prepare_pop_insert_index(index=index)
# get extensible info
cycle_start, cycle_len, patterns = self.get_extensible_info()
# remove extensible fields
fields = self.clear_extensible_fields()
# pop
serialized_value = fields.pop(index-cycle_start)
# add remaining
self.add_fields(*fields)
return serialized_value | python | {
"resource": ""
} |
q273782 | Record.insert | test | def insert(self, index, value):
"""
This method only works for extensible fields. It allows to insert a value, and shifts all other following
values.
Parameters
----------
index: position of insertion
value: value to insert
"""
# prepare index (will check for extensible)
index = self._prepare_pop_insert_index(index=index)
# remove extensible fields
fields = self.clear_extensible_fields()
# insert
fields.insert(index, value)
# add new list
self.add_fields(*fields) | python | {
"resource": ""
} |
q273783 | Record.delete | test | def delete(self):
"""
Deletes record, and removes it from database.
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. unregister: links, hooks and external files
# 3. remove from table without unregistering
# unregister links
self._unregister_links()
# unregister hooks
self._unregister_hooks()
# unregister external files
self._unregister_external_files()
# tell table to remove without unregistering
self.get_table()._dev_remove_record_without_unregistering(self)
# make stale
self._table = None
self._data = None | python | {
"resource": ""
} |
q273784 | RelationsManager.register_record_hook | test | def register_record_hook(self, hook):
"""
target record must have been set
"""
for key in hook.keys:
if key in self._record_hooks:
field_descriptor = hook.target_record.get_field_descriptor(hook.target_index)
raise FieldValidationError(
f"Reference key already exists, can't create: {key}. "
f"{field_descriptor.get_error_location_message(hook.target_value, hook.target_index)}"
)
self._record_hooks[key] = hook | python | {
"resource": ""
} |
q273785 | RelationsManager.register_link | test | def register_link(self, link):
"""
source record and index must have been set
"""
keys = tuple((ref, link.initial_hook_value) for ref in link.hook_references)
# look for a record hook
for k in keys:
if k in self._record_hooks:
# set link target
link.set_target(target_record=self._record_hooks[k].target_record)
break
else:
# look for a table hook
for k in keys:
if k in self._table_hooks:
# set link target
link.set_target(target_table=self._table_hooks[k])
break
else:
field_descriptor = link.source_record.get_field_descriptor(link.source_index)
raise FieldValidationError(
f"No object found with any of given references : {keys}. "
f"{field_descriptor.get_error_location_message(link.initial_hook_value)}"
)
# store by source
if link.source_record not in self._links_by_source:
self._links_by_source[link.source_record] = set()
self._links_by_source[link.source_record].add(link)
# store by target
if link.target not in self._links_by_target:
self._links_by_target[link.target] = set()
self._links_by_target[link.target].add(link) | python | {
"resource": ""
} |
q273786 | IntentContainer._create_regex | test | def _create_regex(self, line, intent_name):
""" Create regex and return. If error occurs returns None. """
try:
return re.compile(self._create_intent_pattern(line, intent_name),
re.IGNORECASE)
except sre_constants.error as e:
LOG.warning('Failed to parse the line "{}" '
'for {}'.format(line, intent_name))
return None | python | {
"resource": ""
} |
q273787 | BaseEvent.remaining_duration | test | def remaining_duration(self, time):
'''Returns the remaining duration for a recording.
'''
return max(0, self.end - max(self.start, time)) | python | {
"resource": ""
} |
q273788 | BaseEvent.serialize | test | def serialize(self):
'''Serialize this object as dictionary usable for conversion to JSON.
:return: Dictionary representing this object.
'''
return {
'type': 'event',
'id': self.uid,
'attributes': {
'start': self.start,
'end': self.end,
'uid': self.uid,
'title': self.title,
'data': self.get_data(),
'status': Status.str(self.status)
}
} | python | {
"resource": ""
} |
q273789 | http_request | test | def http_request(url, post_data=None):
'''Make an HTTP request to a given URL with optional parameters.
'''
logger.debug('Requesting URL: %s' % url)
buf = bio()
curl = pycurl.Curl()
curl.setopt(curl.URL, url.encode('ascii', 'ignore'))
# Disable HTTPS verification methods if insecure is set
if config()['server']['insecure']:
curl.setopt(curl.SSL_VERIFYPEER, 0)
curl.setopt(curl.SSL_VERIFYHOST, 0)
if config()['server']['certificate']:
# Make sure verification methods are turned on
curl.setopt(curl.SSL_VERIFYPEER, 1)
curl.setopt(curl.SSL_VERIFYHOST, 2)
# Import your certificates
curl.setopt(pycurl.CAINFO, config()['server']['certificate'])
if post_data:
curl.setopt(curl.HTTPPOST, post_data)
curl.setopt(curl.WRITEFUNCTION, buf.write)
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
curl.setopt(pycurl.USERPWD, "%s:%s" % (config()['server']['username'],
config()['server']['password']))
curl.setopt(curl.HTTPHEADER, ['X-Requested-Auth: Digest'])
curl.setopt(curl.FAILONERROR, True)
curl.setopt(curl.FOLLOWLOCATION, True)
curl.perform()
curl.close()
result = buf.getvalue()
buf.close()
return result | python | {
"resource": ""
} |
q273790 | get_service | test | def get_service(service_type):
'''Get available service endpoints for a given service type from the
Opencast ServiceRegistry.
'''
endpoint = '/services/available.json?serviceType=' + str(service_type)
url = '%s%s' % (config()['server']['url'], endpoint)
response = http_request(url).decode('utf-8')
services = (json.loads(response).get('services') or {}).get('service', [])
services = ensurelist(services)
endpoints = [service['host'] + service['path'] for service in services
if service['online'] and service['active']]
for endpoint in endpoints:
logger.info(u'Endpoint for %s: %s', service_type, endpoint)
return endpoints | python | {
"resource": ""
} |
q273791 | try_mkdir | test | def try_mkdir(directory):
'''Try to create a directory. Pass without error if it already exists.
'''
try:
os.mkdir(directory)
except OSError as err:
if err.errno != errno.EEXIST:
raise err | python | {
"resource": ""
} |
q273792 | configure_service | test | def configure_service(service):
'''Get the location of a given service from Opencast and add it to the
current configuration.
'''
while not config().get('service-' + service) and not terminate():
try:
config()['service-' + service] = \
get_service('org.opencastproject.' + service)
except pycurl.error as e:
logger.error('Could not get %s endpoint: %s. Retrying in 5s' %
(service, e))
time.sleep(5.0) | python | {
"resource": ""
} |
q273793 | register_ca | test | def register_ca(status='idle'):
'''Register this capture agent at the Matterhorn admin server so that it
shows up in the admin interface.
:param address: Address of the capture agent web ui
:param status: Current status of the capture agent
'''
# If this is a backup CA we don't tell the Matterhorn core that we are
# here. We will just run silently in the background:
if config()['agent']['backup_mode']:
return
params = [('address', config()['ui']['url']), ('state', status)]
name = urlquote(config()['agent']['name'].encode('utf-8'), safe='')
url = '%s/agents/%s' % (config()['service-capture.admin'][0], name)
try:
response = http_request(url, params).decode('utf-8')
if response:
logger.info(response)
except pycurl.error as e:
logger.warning('Could not set agent state to %s: %s' % (status, e)) | python | {
"resource": ""
} |
q273794 | recording_state | test | def recording_state(recording_id, status):
'''Send the state of the current recording to the Matterhorn core.
:param recording_id: ID of the current recording
:param status: Status of the recording
'''
# If this is a backup CA we do not update the recording state since the
# actual CA does that and we want to interfere. We will just run silently
# in the background:
if config()['agent']['backup_mode']:
return
params = [('state', status)]
url = config()['service-capture.admin'][0]
url += '/recordings/%s' % recording_id
try:
result = http_request(url, params)
logger.info(result)
except pycurl.error as e:
logger.warning('Could not set recording state to %s: %s' % (status, e)) | python | {
"resource": ""
} |
q273795 | update_event_status | test | def update_event_status(event, status):
'''Update the status of a particular event in the database.
'''
dbs = db.get_session()
dbs.query(db.RecordedEvent).filter(db.RecordedEvent.start == event.start)\
.update({'status': status})
event.status = status
dbs.commit() | python | {
"resource": ""
} |
q273796 | update_agent_state | test | def update_agent_state():
'''Update the current agent state in opencast.
'''
configure_service('capture.admin')
status = 'idle'
# Determine reported agent state with priority list
if get_service_status(db.Service.SCHEDULE) == db.ServiceStatus.STOPPED:
status = 'offline'
elif get_service_status(db.Service.CAPTURE) == db.ServiceStatus.BUSY:
status = 'capturing'
elif get_service_status(db.Service.INGEST) == db.ServiceStatus.BUSY:
status = 'uploading'
register_ca(status=status) | python | {
"resource": ""
} |
q273797 | configuration_file | test | def configuration_file(cfgfile):
'''Find the best match for the configuration file.
'''
if cfgfile is not None:
return cfgfile
# If no file is explicitely specified, probe for the configuration file
# location.
cfg = './etc/pyca.conf'
if not os.path.isfile(cfg):
return '/etc/pyca.conf'
return cfg | python | {
"resource": ""
} |
q273798 | update_configuration | test | def update_configuration(cfgfile=None):
'''Update configuration from file.
:param cfgfile: Configuration file to load.
'''
configobj.DEFAULT_INTERPOLATION = 'template'
cfgfile = configuration_file(cfgfile)
cfg = configobj.ConfigObj(cfgfile, configspec=cfgspec, encoding='utf-8')
validator = Validator()
val = cfg.validate(validator)
if val is not True:
raise ValueError('Invalid configuration: %s' % val)
if len(cfg['capture']['files']) != len(cfg['capture']['flavors']):
raise ValueError('List of files and flavors do not match')
globals()['__config'] = cfg
logger_init()
if cfg['server'].get('url', '').endswith('/'):
logger.warning('Base URL ends with /. This is most likely a '
'configuration error. The URL should contain nothing '
'of the service paths.')
logger.info('Configuration loaded from %s' % cfgfile)
check()
return cfg | python | {
"resource": ""
} |
q273799 | check | test | def check():
'''Check configuration for sanity.
'''
if config('server')['insecure']:
logger.warning('HTTPS CHECKS ARE TURNED OFF. A SECURE CONNECTION IS '
'NOT GUARANTEED')
if config('server')['certificate']:
# Ensure certificate exists and is readable
open(config('server')['certificate'], 'rb').close()
if config('agent')['backup_mode']:
logger.info('Agent runs in backup mode. No data will be sent to '
'Opencast') | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.