language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/python/autograph/converters/asserts.py | {
"start": 881,
"end": 1691
} | class ____(converter.Base):
"""Transforms Assert nodes to Call so they can be handled as functions."""
def visit_Assert(self, node):
self.generic_visit(node)
# Note: The lone tf.Assert call will be wrapped with control_dependencies
# by side_effect_guards.
template = """
ag__.assert_stmt(test, lambda: msg)
"""
if node.msg is None:
return templates.replace(
template,
test=node.test,
msg=gast.Constant('Assertion error', kind=None))
elif isinstance(node.msg, gast.Constant):
return templates.replace(template, test=node.test, msg=node.msg)
else:
raise NotImplementedError('can only convert string messages for now.')
def transform(node, ctx):
node = AssertTransformer(ctx).visit(node)
return node
| AssertTransformer |
python | numba__numba | numba/core/ir_utils.py | {
"start": 854,
"end": 61443
} | class ____:
def __init__(self, value=0):
self._value = value
def next(self):
self._value += 1
return self._value
def update(self, newval):
self._value = max(newval, self._value)
_the_max_label = _MaxLabel()
del _MaxLabel
def get_unused_var_name(prefix, var_table):
""" Get a new var name with a given prefix and
make sure it is unused in the given variable table.
"""
cur = 0
while True:
var = prefix + str(cur)
if var not in var_table:
return var
cur += 1
def next_label():
return _the_max_label.next()
def mk_alloc(typingctx, typemap, calltypes, lhs, size_var, dtype, scope, loc,
lhs_typ):
"""generate an array allocation with np.empty() and return list of nodes.
size_var can be an int variable or tuple of int variables.
lhs_typ is the type of the array being allocated.
"""
out = []
ndims = 1
size_typ = types.intp
if isinstance(size_var, tuple):
if len(size_var) == 1:
size_var = size_var[0]
size_var = convert_size_to_var(size_var, typemap, scope, loc, out)
else:
# tuple_var = build_tuple([size_var...])
ndims = len(size_var)
tuple_var = ir.Var(scope, mk_unique_var("$tuple_var"), loc)
if typemap:
typemap[tuple_var.name] = types.containers.UniTuple(
types.intp, ndims)
# constant sizes need to be assigned to vars
new_sizes = [convert_size_to_var(s, typemap, scope, loc, out)
for s in size_var]
tuple_call = ir.Expr.build_tuple(new_sizes, loc)
tuple_assign = ir.Assign(tuple_call, tuple_var, loc)
out.append(tuple_assign)
size_var = tuple_var
size_typ = types.containers.UniTuple(types.intp, ndims)
if hasattr(lhs_typ, "__allocate__"):
return lhs_typ.__allocate__(
typingctx,
typemap,
calltypes,
lhs,
size_var,
dtype,
scope,
loc,
lhs_typ,
size_typ,
out,
)
# g_np_var = Global(numpy)
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
if typemap:
typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global('np', numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
# attr call: empty_attr = getattr(g_np_var, empty)
empty_attr_call = ir.Expr.getattr(g_np_var, "empty", loc)
attr_var = ir.Var(scope, mk_unique_var("$empty_attr_attr"), loc)
if typemap:
typemap[attr_var.name] = get_np_ufunc_typ(numpy.empty, typingctx)
attr_assign = ir.Assign(empty_attr_call, attr_var, loc)
# Assume str(dtype) returns a valid type
dtype_str = str(dtype)
# alloc call: lhs = empty_attr(size_var, typ_var)
typ_var = ir.Var(scope, mk_unique_var("$np_typ_var"), loc)
if typemap:
typemap[typ_var.name] = types.functions.NumberClass(dtype)
# If dtype is a datetime/timedelta with a unit,
# then it won't return a valid type and instead can be created
# with a string. i.e. "datetime64[ns]")
if (isinstance(dtype, (types.NPDatetime, types.NPTimedelta)) and
dtype.unit != ''):
typename_const = ir.Const(dtype_str, loc)
typ_var_assign = ir.Assign(typename_const, typ_var, loc)
else:
if dtype_str=='bool':
# empty doesn't like 'bool' sometimes (e.g. kmeans example)
dtype_str = 'bool_'
np_typ_getattr = ir.Expr.getattr(g_np_var, dtype_str, loc)
typ_var_assign = ir.Assign(np_typ_getattr, typ_var, loc)
alloc_call = ir.Expr.call(attr_var, [size_var, typ_var], (), loc)
if calltypes:
cac = typemap[attr_var.name].get_call_type(
typingctx, [size_typ, types.functions.NumberClass(dtype)], {})
# By default, all calls to "empty" are typed as returning a standard
# NumPy ndarray. If we are allocating a ndarray subclass here then
# just change the return type to be that of the subclass.
cac._return_type = (lhs_typ.copy(layout='C')
if lhs_typ.layout == 'F'
else lhs_typ)
calltypes[alloc_call] = cac
if lhs_typ.layout == 'F':
empty_c_typ = lhs_typ.copy(layout='C')
empty_c_var = ir.Var(scope, mk_unique_var("$empty_c_var"), loc)
if typemap:
typemap[empty_c_var.name] = lhs_typ.copy(layout='C')
empty_c_assign = ir.Assign(alloc_call, empty_c_var, loc)
# attr call: asfortranarray = getattr(g_np_var, asfortranarray)
asfortranarray_attr_call = ir.Expr.getattr(g_np_var, "asfortranarray", loc)
afa_attr_var = ir.Var(scope, mk_unique_var("$asfortran_array_attr"), loc)
if typemap:
typemap[afa_attr_var.name] = get_np_ufunc_typ(numpy.asfortranarray, typingctx)
afa_attr_assign = ir.Assign(asfortranarray_attr_call, afa_attr_var, loc)
# call asfortranarray
asfortranarray_call = ir.Expr.call(afa_attr_var, [empty_c_var], (), loc)
if calltypes:
calltypes[asfortranarray_call] = typemap[afa_attr_var.name].get_call_type(
typingctx, [empty_c_typ], {})
asfortranarray_assign = ir.Assign(asfortranarray_call, lhs, loc)
out.extend([g_np_assign, attr_assign, typ_var_assign, empty_c_assign,
afa_attr_assign, asfortranarray_assign])
else:
alloc_assign = ir.Assign(alloc_call, lhs, loc)
out.extend([g_np_assign, attr_assign, typ_var_assign, alloc_assign])
return out
def convert_size_to_var(size_var, typemap, scope, loc, nodes):
if isinstance(size_var, int):
new_size = ir.Var(scope, mk_unique_var("$alloc_size"), loc)
if typemap:
typemap[new_size.name] = types.intp
size_assign = ir.Assign(ir.Const(size_var, loc), new_size, loc)
nodes.append(size_assign)
return new_size
assert isinstance(size_var, ir.Var)
return size_var
def get_np_ufunc_typ(func, typingctx):
"""get type of the incoming function
Resolve using the context for target-awareness
"""
try:
return typingctx.resolve_value_type(func)
except TypingError:
raise RuntimeError("type for func ", func, " not found")
def mk_range_block(typemap, start, stop, step, calltypes, scope, loc):
"""make a block that initializes loop range and iteration variables.
target label in jump needs to be set.
"""
# g_range_var = Global(range)
g_range_var = ir.Var(scope, mk_unique_var("$range_g_var"), loc)
typemap[g_range_var.name] = get_global_func_typ(range)
g_range = ir.Global('range', range, loc)
g_range_assign = ir.Assign(g_range, g_range_var, loc)
arg_nodes, args = _mk_range_args(typemap, start, stop, step, scope, loc)
# range_call_var = call g_range_var(start, stop, step)
range_call = ir.Expr.call(g_range_var, args, (), loc)
calltypes[range_call] = typemap[g_range_var.name].get_call_type(
typing.Context(), [types.intp] * len(args), {})
#signature(types.range_state64_type, types.intp)
range_call_var = ir.Var(scope, mk_unique_var("$range_c_var"), loc)
typemap[range_call_var.name] = types.iterators.RangeType(types.intp)
range_call_assign = ir.Assign(range_call, range_call_var, loc)
# iter_var = getiter(range_call_var)
iter_call = ir.Expr.getiter(range_call_var, loc)
if config.USE_LEGACY_TYPE_SYSTEM:
calltype_sig = signature(types.range_iter64_type, types.range_state64_type)
else:
calltype_sig = signature(types.range_iter_type, types.range_state_type)
calltypes[iter_call] = calltype_sig
iter_var = ir.Var(scope, mk_unique_var("$iter_var"), loc)
typemap[iter_var.name] = types.iterators.RangeIteratorType(types.intp)
iter_call_assign = ir.Assign(iter_call, iter_var, loc)
# $phi = iter_var
phi_var = ir.Var(scope, mk_unique_var("$phi"), loc)
typemap[phi_var.name] = types.iterators.RangeIteratorType(types.intp)
phi_assign = ir.Assign(iter_var, phi_var, loc)
# jump to header
jump_header = ir.Jump(-1, loc)
range_block = ir.Block(scope, loc)
range_block.body = arg_nodes + [g_range_assign, range_call_assign,
iter_call_assign, phi_assign, jump_header]
return range_block
def _mk_range_args(typemap, start, stop, step, scope, loc):
nodes = []
if isinstance(stop, ir.Var):
g_stop_var = stop
else:
assert isinstance(stop, int)
g_stop_var = ir.Var(scope, mk_unique_var("$range_stop"), loc)
if typemap:
typemap[g_stop_var.name] = types.intp
stop_assign = ir.Assign(ir.Const(stop, loc), g_stop_var, loc)
nodes.append(stop_assign)
if start == 0 and step == 1:
return nodes, [g_stop_var]
if isinstance(start, ir.Var):
g_start_var = start
else:
assert isinstance(start, int)
g_start_var = ir.Var(scope, mk_unique_var("$range_start"), loc)
if typemap:
typemap[g_start_var.name] = types.intp
start_assign = ir.Assign(ir.Const(start, loc), g_start_var, loc)
nodes.append(start_assign)
if step == 1:
return nodes, [g_start_var, g_stop_var]
if isinstance(step, ir.Var):
g_step_var = step
else:
assert isinstance(step, int)
g_step_var = ir.Var(scope, mk_unique_var("$range_step"), loc)
if typemap:
typemap[g_step_var.name] = types.intp
step_assign = ir.Assign(ir.Const(step, loc), g_step_var, loc)
nodes.append(step_assign)
return nodes, [g_start_var, g_stop_var, g_step_var]
def get_global_func_typ(func):
"""get type variable for func() from builtin registry"""
for (k, v) in typing.templates.builtin_registry.globals:
if k == func:
return v
raise RuntimeError("func type not found {}".format(func))
def mk_loop_header(typemap, phi_var, calltypes, scope, loc):
"""make a block that is a loop header updating iteration variables.
target labels in branch need to be set.
"""
# iternext_var = iternext(phi_var)
iternext_var = ir.Var(scope, mk_unique_var("$iternext_var"), loc)
typemap[iternext_var.name] = types.containers.Pair(
types.intp, types.boolean)
iternext_call = ir.Expr.iternext(phi_var, loc)
if config.USE_LEGACY_TYPE_SYSTEM:
range_iter_type = types.range_iter64_type
else:
range_iter_type = types.range_iter_type
calltypes[iternext_call] = signature(
types.containers.Pair(
types.intp,
types.boolean),
range_iter_type)
iternext_assign = ir.Assign(iternext_call, iternext_var, loc)
# pair_first_var = pair_first(iternext_var)
pair_first_var = ir.Var(scope, mk_unique_var("$pair_first_var"), loc)
typemap[pair_first_var.name] = types.intp
pair_first_call = ir.Expr.pair_first(iternext_var, loc)
pair_first_assign = ir.Assign(pair_first_call, pair_first_var, loc)
# pair_second_var = pair_second(iternext_var)
pair_second_var = ir.Var(scope, mk_unique_var("$pair_second_var"), loc)
typemap[pair_second_var.name] = types.boolean
pair_second_call = ir.Expr.pair_second(iternext_var, loc)
pair_second_assign = ir.Assign(pair_second_call, pair_second_var, loc)
# phi_b_var = pair_first_var
phi_b_var = ir.Var(scope, mk_unique_var("$phi"), loc)
typemap[phi_b_var.name] = types.intp
phi_b_assign = ir.Assign(pair_first_var, phi_b_var, loc)
# branch pair_second_var body_block out_block
branch = ir.Branch(pair_second_var, -1, -1, loc)
header_block = ir.Block(scope, loc)
header_block.body = [iternext_assign, pair_first_assign,
pair_second_assign, phi_b_assign, branch]
return header_block
def legalize_names(varnames):
"""returns a dictionary for conversion of variable names to legal
parameter names.
"""
var_map = {}
for var in varnames:
new_name = var.replace("_", "__").replace("$", "_").replace(".", "_")
assert new_name not in var_map
var_map[var] = new_name
return var_map
def get_name_var_table(blocks):
"""create a mapping from variable names to their ir.Var objects"""
def get_name_var_visit(var, namevar):
namevar[var.name] = var
return var
namevar = {}
visit_vars(blocks, get_name_var_visit, namevar)
return namevar
def replace_var_names(blocks, namedict):
"""replace variables (ir.Var to ir.Var) from dictionary (name -> name)"""
# remove identity values to avoid infinite loop
new_namedict = {}
for l, r in namedict.items():
if l != r:
new_namedict[l] = r
def replace_name(var, namedict):
assert isinstance(var, ir.Var)
while var.name in namedict:
var = ir.Var(var.scope, namedict[var.name], var.loc)
return var
visit_vars(blocks, replace_name, new_namedict)
def replace_var_callback(var, vardict):
assert isinstance(var, ir.Var)
while var.name in vardict.keys():
assert(vardict[var.name].name != var.name)
new_var = vardict[var.name]
var = ir.Var(new_var.scope, new_var.name, new_var.loc)
return var
def replace_vars(blocks, vardict):
"""replace variables (ir.Var to ir.Var) from dictionary (name -> ir.Var)"""
# remove identity values to avoid infinite loop
new_vardict = {}
for l, r in vardict.items():
if l != r.name:
new_vardict[l] = r
visit_vars(blocks, replace_var_callback, new_vardict)
def replace_vars_stmt(stmt, vardict):
visit_vars_stmt(stmt, replace_var_callback, vardict)
def replace_vars_inner(node, vardict):
return visit_vars_inner(node, replace_var_callback, vardict)
# other packages that define new nodes add calls to visit variables in them
# format: {type:function}
visit_vars_extensions = {}
def visit_vars(blocks, callback, cbdata):
"""go over statements of block bodies and replace variable names with
dictionary.
"""
for block in blocks.values():
for stmt in block.body:
visit_vars_stmt(stmt, callback, cbdata)
return
def visit_vars_stmt(stmt, callback, cbdata):
# let external calls handle stmt if type matches
for t, f in visit_vars_extensions.items():
if isinstance(stmt, t):
f(stmt, callback, cbdata)
return
if isinstance(stmt, ir.Assign):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.Arg):
stmt.name = visit_vars_inner(stmt.name, callback, cbdata)
elif isinstance(stmt, ir.Return):
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.Raise):
stmt.exception = visit_vars_inner(stmt.exception, callback, cbdata)
elif isinstance(stmt, ir.Branch):
stmt.cond = visit_vars_inner(stmt.cond, callback, cbdata)
elif isinstance(stmt, ir.Jump):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
elif isinstance(stmt, ir.Del):
# Because Del takes only a var name, we make up by
# constructing a temporary variable.
var = ir.Var(None, stmt.value, stmt.loc)
var = visit_vars_inner(var, callback, cbdata)
stmt.value = var.name
elif isinstance(stmt, ir.DelAttr):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.attr = visit_vars_inner(stmt.attr, callback, cbdata)
elif isinstance(stmt, ir.SetAttr):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.attr = visit_vars_inner(stmt.attr, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.DelItem):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.index = visit_vars_inner(stmt.index, callback, cbdata)
elif isinstance(stmt, ir.StaticSetItem):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.index_var = visit_vars_inner(stmt.index_var, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.SetItem):
stmt.target = visit_vars_inner(stmt.target, callback, cbdata)
stmt.index = visit_vars_inner(stmt.index, callback, cbdata)
stmt.value = visit_vars_inner(stmt.value, callback, cbdata)
elif isinstance(stmt, ir.Print):
stmt.args = [visit_vars_inner(x, callback, cbdata) for x in stmt.args]
else:
# TODO: raise NotImplementedError("no replacement for IR node: ", stmt)
pass
return
def visit_vars_inner(node, callback, cbdata):
if isinstance(node, ir.Var):
return callback(node, cbdata)
elif isinstance(node, list):
return [visit_vars_inner(n, callback, cbdata) for n in node]
elif isinstance(node, tuple):
return tuple([visit_vars_inner(n, callback, cbdata) for n in node])
elif isinstance(node, ir.Expr):
# if node.op in ['binop', 'inplace_binop']:
# lhs = node.lhs.name
# rhs = node.rhs.name
# node.lhs.name = callback, cbdata.get(lhs, lhs)
# node.rhs.name = callback, cbdata.get(rhs, rhs)
for arg in node._kws.keys():
node._kws[arg] = visit_vars_inner(node._kws[arg], callback, cbdata)
elif isinstance(node, ir.Yield):
node.value = visit_vars_inner(node.value, callback, cbdata)
return node
add_offset_to_labels_extensions = {}
def add_offset_to_labels(blocks, offset):
"""add an offset to all block labels and jump/branch targets
"""
new_blocks = {}
for l, b in blocks.items():
# some parfor last blocks might be empty
term = None
if b.body:
term = b.body[-1]
for inst in b.body:
for T, f in add_offset_to_labels_extensions.items():
if isinstance(inst, T):
f_max = f(inst, offset)
if isinstance(term, ir.Jump):
b.body[-1] = ir.Jump(term.target + offset, term.loc)
if isinstance(term, ir.Branch):
b.body[-1] = ir.Branch(term.cond, term.truebr + offset,
term.falsebr + offset, term.loc)
new_blocks[l + offset] = b
return new_blocks
find_max_label_extensions = {}
def find_max_label(blocks):
max_label = 0
for l, b in blocks.items():
term = None
if b.body:
term = b.body[-1]
for inst in b.body:
for T, f in find_max_label_extensions.items():
if isinstance(inst, T):
f_max = f(inst)
if f_max > max_label:
max_label = f_max
if l > max_label:
max_label = l
return max_label
def flatten_labels(blocks):
"""makes the labels in range(0, len(blocks)), useful to compare CFGs
"""
# first bulk move the labels out of the rewrite range
blocks = add_offset_to_labels(blocks, find_max_label(blocks) + 1)
# order them in topo order because it's easier to read
new_blocks = {}
topo_order = find_topo_order(blocks)
l_map = dict()
idx = 0
for x in topo_order:
l_map[x] = idx
idx += 1
for t_node in topo_order:
b = blocks[t_node]
# some parfor last blocks might be empty
term = None
if b.body:
term = b.body[-1]
if isinstance(term, ir.Jump):
b.body[-1] = ir.Jump(l_map[term.target], term.loc)
if isinstance(term, ir.Branch):
b.body[-1] = ir.Branch(term.cond, l_map[term.truebr],
l_map[term.falsebr], term.loc)
new_blocks[l_map[t_node]] = b
return new_blocks
def remove_dels(blocks):
"""remove ir.Del nodes"""
for block in blocks.values():
new_body = []
for stmt in block.body:
if not isinstance(stmt, ir.Del):
new_body.append(stmt)
block.body = new_body
return
def remove_args(blocks):
"""remove ir.Arg nodes"""
for block in blocks.values():
new_body = []
for stmt in block.body:
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Arg):
continue
new_body.append(stmt)
block.body = new_body
return
def dead_code_elimination(func_ir, typemap=None, alias_map=None,
arg_aliases=None):
""" Performs dead code elimination and leaves the IR in a valid state on
exit
"""
do_post_proc = False
while (remove_dead(func_ir.blocks, func_ir.arg_names, func_ir, typemap,
alias_map, arg_aliases)):
do_post_proc = True
if do_post_proc:
post_proc = postproc.PostProcessor(func_ir)
post_proc.run()
def remove_dead(blocks, args, func_ir, typemap=None, alias_map=None, arg_aliases=None):
"""dead code elimination using liveness and CFG info.
Returns True if something has been removed, or False if nothing is removed.
"""
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
call_table, _ = get_call_table(blocks)
if alias_map is None or arg_aliases is None:
alias_map, arg_aliases = find_potential_aliases(blocks, args, typemap,
func_ir)
if config.DEBUG_ARRAY_OPT >= 1:
print("args:", args)
print("alias map:", alias_map)
print("arg_aliases:", arg_aliases)
print("live_map:", live_map)
print("usemap:", usedefs.usemap)
print("defmap:", usedefs.defmap)
# keep set for easier search
alias_set = set(alias_map.keys())
removed = False
for label, block in blocks.items():
# find live variables at each statement to delete dead assignment
lives = {v.name for v in block.terminator.list_vars()}
if config.DEBUG_ARRAY_OPT >= 2:
print("remove_dead processing block", label, lives)
# find live variables at the end of block
for out_blk, _data in cfg.successors(label):
if config.DEBUG_ARRAY_OPT >= 2:
print("succ live_map", out_blk, live_map[out_blk])
lives |= live_map[out_blk]
removed |= remove_dead_block(block, lives, call_table, arg_aliases,
alias_map, alias_set, func_ir, typemap)
return removed
# other packages that define new nodes add calls to remove dead code in them
# format: {type:function}
remove_dead_extensions = {}
def remove_dead_block(block, lives, call_table, arg_aliases, alias_map,
alias_set, func_ir, typemap):
"""remove dead code using liveness info.
Mutable arguments (e.g. arrays) that are not definitely assigned are live
after return of function.
"""
# TODO: find mutable args that are not definitely assigned instead of
# assuming all args are live after return
removed = False
# add statements in reverse order
new_body = [block.terminator]
# for each statement in reverse order, excluding terminator
for stmt in reversed(block.body[:-1]):
if config.DEBUG_ARRAY_OPT >= 2:
print("remove_dead_block", stmt)
# aliases of lives are also live
alias_lives = set()
init_alias_lives = lives & alias_set
for v in init_alias_lives:
alias_lives |= alias_map[v]
lives_n_aliases = lives | alias_lives | arg_aliases
# let external calls handle stmt if type matches
if type(stmt) in remove_dead_extensions:
f = remove_dead_extensions[type(stmt)]
stmt = f(stmt, lives, lives_n_aliases, arg_aliases, alias_map, func_ir,
typemap)
if stmt is None:
if config.DEBUG_ARRAY_OPT >= 2:
print("Statement was removed.")
removed = True
continue
# ignore assignments that their lhs is not live or lhs==rhs
if isinstance(stmt, ir.Assign):
lhs = stmt.target
rhs = stmt.value
if lhs.name not in lives and has_no_side_effect(
rhs, lives_n_aliases, call_table):
if config.DEBUG_ARRAY_OPT >= 2:
print("Statement was removed.")
removed = True
continue
if isinstance(rhs, ir.Var) and lhs.name == rhs.name:
if config.DEBUG_ARRAY_OPT >= 2:
print("Statement was removed.")
removed = True
continue
# TODO: remove other nodes like SetItem etc.
if isinstance(stmt, ir.Del):
if stmt.value not in lives:
if config.DEBUG_ARRAY_OPT >= 2:
print("Statement was removed.")
removed = True
continue
if isinstance(stmt, ir.SetItem):
name = stmt.target.name
if name not in lives_n_aliases:
if config.DEBUG_ARRAY_OPT >= 2:
print("Statement was removed.")
continue
if type(stmt) in analysis.ir_extension_usedefs:
def_func = analysis.ir_extension_usedefs[type(stmt)]
uses, defs = def_func(stmt)
lives -= defs
lives |= uses
else:
lives |= {v.name for v in stmt.list_vars()}
if isinstance(stmt, ir.Assign):
# make sure lhs is not used in rhs, e.g. a = g(a)
if isinstance(stmt.value, ir.Expr):
rhs_vars = {v.name for v in stmt.value.list_vars()}
if lhs.name not in rhs_vars:
lives.remove(lhs.name)
else:
lives.remove(lhs.name)
new_body.append(stmt)
new_body.reverse()
block.body = new_body
return removed
# list of functions
remove_call_handlers = []
def remove_dead_random_call(rhs, lives, call_list):
if len(call_list) == 3 and call_list[1:] == ['random', numpy]:
return call_list[0] not in {'seed', 'shuffle'}
return False
remove_call_handlers.append(remove_dead_random_call)
def has_no_side_effect(rhs, lives, call_table):
""" Returns True if this expression has no side effects that
would prevent re-ordering.
"""
from numba.parfors import array_analysis, parfor
from numba.misc.special import prange
if isinstance(rhs, ir.Expr) and rhs.op == 'call':
func_name = rhs.func.name
if func_name not in call_table or call_table[func_name] == []:
return False
call_list = call_table[func_name]
if (call_list == ['empty', numpy] or
call_list == [slice] or
call_list == ['stencil', numba] or
call_list == ['log', numpy] or
call_list == ['dtype', numpy] or
call_list == [array_analysis.wrap_index] or
call_list == [prange] or
call_list == ['prange', numba] or
call_list == ['pndindex', numba] or
call_list == [parfor.internal_prange] or
call_list == ['ceil', math] or
call_list == [max] or
call_list == [int]):
return True
elif (isinstance(call_list[0], _Intrinsic) and
(call_list[0]._name == 'empty_inferred' or
call_list[0]._name == 'unsafe_empty_inferred')):
return True
from numba.core.registry import CPUDispatcher
from numba.np.linalg import dot_3_mv_check_args
if isinstance(call_list[0], CPUDispatcher):
py_func = call_list[0].py_func
if py_func == dot_3_mv_check_args:
return True
for f in remove_call_handlers:
if f(rhs, lives, call_list):
return True
return False
if isinstance(rhs, ir.Expr) and rhs.op == 'inplace_binop':
return rhs.lhs.name not in lives
if isinstance(rhs, ir.Yield):
return False
if isinstance(rhs, ir.Expr) and rhs.op == 'pair_first':
# don't remove pair_first since prange looks for it
return False
return True
is_pure_extensions = []
def is_pure(rhs, lives, call_table):
""" Returns True if every time this expression is evaluated it
returns the same result. This is not the case for things
like calls to numpy.random.
"""
if isinstance(rhs, ir.Expr):
if rhs.op == 'call':
func_name = rhs.func.name
if func_name not in call_table or call_table[func_name] == []:
return False
call_list = call_table[func_name]
if (call_list == [slice] or
call_list == ['log', numpy] or
call_list == ['empty', numpy] or
call_list == ['ceil', math] or
call_list == [max] or
call_list == [int]):
return True
for f in is_pure_extensions:
if f(rhs, lives, call_list):
return True
return False
elif rhs.op == 'getiter' or rhs.op == 'iternext':
return False
if isinstance(rhs, ir.Yield):
return False
return True
def is_const_call(module_name, func_name):
# Returns True if there is no state in the given module changed by the given function.
if module_name == 'numpy':
if func_name in ['empty']:
return True
return False
alias_analysis_extensions = {}
alias_func_extensions = {}
def get_canonical_alias(v, alias_map):
if v not in alias_map:
return v
v_aliases = sorted(list(alias_map[v]))
return v_aliases[0]
def find_potential_aliases(blocks, args, typemap, func_ir, alias_map=None,
arg_aliases=None):
"find all array aliases and argument aliases to avoid remove as dead"
if alias_map is None:
alias_map = {}
if arg_aliases is None:
arg_aliases = set(a for a in args if not is_immutable_type(a, typemap))
# update definitions since they are not guaranteed to be up-to-date
# FIXME keep definitions up-to-date to avoid the need for rebuilding
func_ir._definitions = build_definitions(func_ir.blocks)
np_alias_funcs = ['ravel', 'transpose', 'reshape']
for bl in blocks.values():
for instr in bl.body:
if type(instr) in alias_analysis_extensions:
f = alias_analysis_extensions[type(instr)]
f(instr, args, typemap, func_ir, alias_map, arg_aliases)
if isinstance(instr, ir.Assign):
expr = instr.value
lhs = instr.target.name
# only mutable types can alias
if is_immutable_type(lhs, typemap):
continue
if isinstance(expr, ir.Var) and lhs!=expr.name:
_add_alias(lhs, expr.name, alias_map, arg_aliases)
# subarrays like A = B[0] for 2D B
if (isinstance(expr, ir.Expr) and (expr.op == 'cast' or
expr.op in ['getitem', 'static_getitem'])):
_add_alias(lhs, expr.value.name, alias_map, arg_aliases)
if isinstance(expr, ir.Expr) and expr.op == 'inplace_binop':
_add_alias(lhs, expr.lhs.name, alias_map, arg_aliases)
# array attributes like A.T
if (isinstance(expr, ir.Expr) and expr.op == 'getattr'
and expr.attr in ['T', 'ctypes', 'flat']):
_add_alias(lhs, expr.value.name, alias_map, arg_aliases)
# a = b.c. a should alias b
if (isinstance(expr, ir.Expr) and expr.op == 'getattr'
and expr.attr not in ['shape']
and expr.value.name in arg_aliases):
_add_alias(lhs, expr.value.name, alias_map, arg_aliases)
# calls that can create aliases such as B = A.ravel()
if isinstance(expr, ir.Expr) and expr.op == 'call':
fdef = guard(find_callname, func_ir, expr, typemap)
# TODO: sometimes gufunc backend creates duplicate code
# causing find_callname to fail. Example: test_argmax
# ignored here since those cases don't create aliases
# but should be fixed in general
if fdef is None:
continue
fname, fmod = fdef
if fdef in alias_func_extensions:
alias_func = alias_func_extensions[fdef]
alias_func(lhs, expr.args, alias_map, arg_aliases)
if fmod == 'numpy' and fname in np_alias_funcs:
_add_alias(lhs, expr.args[0].name, alias_map, arg_aliases)
if isinstance(fmod, ir.Var) and fname in np_alias_funcs:
_add_alias(lhs, fmod.name, alias_map, arg_aliases)
# copy to avoid changing size during iteration
old_alias_map = copy.deepcopy(alias_map)
# combine all aliases transitively
for v in old_alias_map:
for w in old_alias_map[v]:
alias_map[v] |= alias_map[w]
for w in old_alias_map[v]:
alias_map[w] = alias_map[v]
return alias_map, arg_aliases
def _add_alias(lhs, rhs, alias_map, arg_aliases):
if rhs in arg_aliases:
arg_aliases.add(lhs)
else:
if rhs not in alias_map:
alias_map[rhs] = set()
if lhs not in alias_map:
alias_map[lhs] = set()
alias_map[rhs].add(lhs)
alias_map[lhs].add(rhs)
return
def is_immutable_type(var, typemap):
# Conservatively, assume mutable if type not available
if typemap is None or var not in typemap:
return False
typ = typemap[var]
# TODO: add more immutable types
if isinstance(typ, (types.Number, types.scalars._NPDatetimeBase,
types.iterators.RangeType)):
return True
if typ==types.string:
return True
# conservatively, assume mutable
return False
def copy_propagate(blocks, typemap):
"""compute copy propagation information for each block using fixed-point
iteration on data flow equations:
in_b = intersect(predec(B))
out_b = gen_b | (in_b - kill_b)
"""
cfg = compute_cfg_from_blocks(blocks)
entry = cfg.entry_point()
# format: dict of block labels to copies as tuples
# label -> (l,r)
c_data = init_copy_propagate_data(blocks, entry, typemap)
(gen_copies, all_copies, kill_copies, in_copies, out_copies) = c_data
old_point = None
new_point = copy.deepcopy(out_copies)
# comparison works since dictionary of built-in types
while old_point != new_point:
for label in blocks.keys():
if label == entry:
continue
predecs = [i for i, _d in cfg.predecessors(label)]
# in_b = intersect(predec(B))
in_copies[label] = out_copies[predecs[0]].copy()
for p in predecs:
in_copies[label] &= out_copies[p]
# out_b = gen_b | (in_b - kill_b)
out_copies[label] = (gen_copies[label]
| (in_copies[label] - kill_copies[label]))
old_point = new_point
new_point = copy.deepcopy(out_copies)
if config.DEBUG_ARRAY_OPT >= 1:
print("copy propagate out_copies:", out_copies)
return in_copies, out_copies
def init_copy_propagate_data(blocks, entry, typemap):
"""get initial condition of copy propagation data flow for each block.
"""
# gen is all definite copies, extra_kill is additional ones that may hit
# for example, parfors can have control flow so they may hit extra copies
gen_copies, extra_kill = get_block_copies(blocks, typemap)
# set of all program copies
all_copies = set()
for l, s in gen_copies.items():
all_copies |= gen_copies[l]
kill_copies = {}
for label, gen_set in gen_copies.items():
kill_copies[label] = set()
for lhs, rhs in all_copies:
if lhs in extra_kill[label] or rhs in extra_kill[label]:
kill_copies[label].add((lhs, rhs))
# a copy is killed if it is not in this block and lhs or rhs are
# assigned in this block
assigned = {lhs for lhs, rhs in gen_set}
if ((lhs, rhs) not in gen_set
and (lhs in assigned or rhs in assigned)):
kill_copies[label].add((lhs, rhs))
# set initial values
# all copies are in for all blocks except entry
in_copies = {l: all_copies.copy() for l in blocks.keys()}
in_copies[entry] = set()
out_copies = {}
for label in blocks.keys():
# out_b = gen_b | (in_b - kill_b)
out_copies[label] = (gen_copies[label]
| (in_copies[label] - kill_copies[label]))
out_copies[entry] = gen_copies[entry]
return (gen_copies, all_copies, kill_copies, in_copies, out_copies)
# other packages that define new nodes add calls to get copies in them
# format: {type:function}
copy_propagate_extensions = {}
def get_block_copies(blocks, typemap):
"""get copies generated and killed by each block
"""
block_copies = {}
extra_kill = {}
for label, block in blocks.items():
assign_dict = {}
extra_kill[label] = set()
# assignments as dict to replace with latest value
for stmt in block.body:
for T, f in copy_propagate_extensions.items():
if isinstance(stmt, T):
gen_set, kill_set = f(stmt, typemap)
for lhs, rhs in gen_set:
assign_dict[lhs] = rhs
# if a=b is in dict and b is killed, a is also killed
new_assign_dict = {}
for l, r in assign_dict.items():
if l not in kill_set and r not in kill_set:
new_assign_dict[l] = r
if r in kill_set:
extra_kill[label].add(l)
assign_dict = new_assign_dict
extra_kill[label] |= kill_set
if isinstance(stmt, ir.Assign):
lhs = stmt.target.name
if isinstance(stmt.value, ir.Var):
rhs = stmt.value.name
# copy is valid only if same type (see
# TestCFunc.test_locals)
# Some transformations can produce assignments of the
# form A = A. We don't put these mapping in the
# copy propagation set because then you get cycles and
# infinite loops in the replacement phase.
if typemap[lhs] == typemap[rhs] and lhs != rhs:
assign_dict[lhs] = rhs
continue
if isinstance(stmt.value,
ir.Expr) and stmt.value.op == 'inplace_binop':
in1_var = stmt.value.lhs.name
in1_typ = typemap[in1_var]
# inplace_binop assigns first operand if mutable
if not (isinstance(in1_typ, types.Number)
or in1_typ == types.string):
extra_kill[label].add(in1_var)
# if a=b is in dict and b is killed, a is also killed
new_assign_dict = {}
for l, r in assign_dict.items():
if l != in1_var and r != in1_var:
new_assign_dict[l] = r
if r == in1_var:
extra_kill[label].add(l)
assign_dict = new_assign_dict
extra_kill[label].add(lhs)
block_cps = set(assign_dict.items())
block_copies[label] = block_cps
return block_copies, extra_kill
# other packages that define new nodes add calls to apply copy propagate in them
# format: {type:function}
apply_copy_propagate_extensions = {}
def apply_copy_propagate(blocks, in_copies, name_var_table, typemap, calltypes,
save_copies=None):
"""apply copy propagation to IR: replace variables when copies available"""
# save_copies keeps an approximation of the copies that were applied, so
# that the variable names of removed user variables can be recovered to some
# extent.
if save_copies is None:
save_copies = []
for label, block in blocks.items():
var_dict = {l: name_var_table[r] for l, r in in_copies[label]}
# assignments as dict to replace with latest value
for stmt in block.body:
if type(stmt) in apply_copy_propagate_extensions:
f = apply_copy_propagate_extensions[type(stmt)]
f(stmt, var_dict, name_var_table,
typemap, calltypes, save_copies)
# only rhs of assignments should be replaced
# e.g. if x=y is available, x in x=z shouldn't be replaced
elif isinstance(stmt, ir.Assign):
stmt.value = replace_vars_inner(stmt.value, var_dict)
else:
replace_vars_stmt(stmt, var_dict)
fix_setitem_type(stmt, typemap, calltypes)
for T, f in copy_propagate_extensions.items():
if isinstance(stmt, T):
gen_set, kill_set = f(stmt, typemap)
for lhs, rhs in gen_set:
if rhs in name_var_table:
var_dict[lhs] = name_var_table[rhs]
for l, r in var_dict.copy().items():
if l in kill_set or r.name in kill_set:
var_dict.pop(l)
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Var):
lhs = stmt.target.name
rhs = stmt.value.name
# rhs could be replaced with lhs from previous copies
if lhs != rhs:
# copy is valid only if same type (see
# TestCFunc.test_locals)
if typemap[lhs] == typemap[rhs] and rhs in name_var_table:
var_dict[lhs] = name_var_table[rhs]
else:
var_dict.pop(lhs, None)
# a=b kills previous t=a
lhs_kill = []
for k, v in var_dict.items():
if v.name == lhs:
lhs_kill.append(k)
for k in lhs_kill:
var_dict.pop(k, None)
if (isinstance(stmt, ir.Assign)
and not isinstance(stmt.value, ir.Var)):
lhs = stmt.target.name
var_dict.pop(lhs, None)
# previous t=a is killed if a is killed
lhs_kill = []
for k, v in var_dict.items():
if v.name == lhs:
lhs_kill.append(k)
for k in lhs_kill:
var_dict.pop(k, None)
save_copies.extend(var_dict.items())
return save_copies
def fix_setitem_type(stmt, typemap, calltypes):
"""Copy propagation can replace setitem target variable, which can be array
with 'A' layout. The replaced variable can be 'C' or 'F', so we update
setitem call type reflect this (from matrix power test)
"""
if not isinstance(stmt, (ir.SetItem, ir.StaticSetItem)):
return
t_typ = typemap[stmt.target.name]
s_typ = calltypes[stmt].args[0]
# test_optional t_typ can be Optional with array
if not isinstance(
s_typ,
types.npytypes.Array) or not isinstance(
t_typ,
types.npytypes.Array):
return
if s_typ.layout == 'A' and t_typ.layout != 'A':
new_s_typ = s_typ.copy(layout=t_typ.layout)
calltypes[stmt].args = (
new_s_typ,
calltypes[stmt].args[1],
calltypes[stmt].args[2])
return
def dprint_func_ir(func_ir, title, blocks=None):
"""Debug print function IR, with an optional blocks argument
that may differ from the IR's original blocks.
"""
if config.DEBUG_ARRAY_OPT >= 1:
ir_blocks = func_ir.blocks
func_ir.blocks = ir_blocks if blocks == None else blocks
name = func_ir.func_id.func_qualname
print(("IR %s: %s" % (title, name)).center(80, "-"))
func_ir.dump()
print("-" * 40)
func_ir.blocks = ir_blocks
def find_topo_order(blocks, cfg = None):
"""find topological order of blocks such that true branches are visited
first (e.g. for_break test in test_dataflow). This is written as an iterative
implementation of post order traversal to avoid recursion limit issues.
"""
if cfg is None:
cfg = compute_cfg_from_blocks(blocks)
post_order = []
# Has the node already added its children?
seen = set()
# Has the node already been pushed to post order?
visited = set()
stack = [cfg.entry_point()]
while len(stack) > 0:
node = stack[-1]
if node not in visited and node not in seen:
# We haven't added a node or its children.
seen.add(node)
succs = cfg._succs[node]
last_inst = blocks[node].body[-1]
if isinstance(last_inst, ir.Branch):
succs = [last_inst.truebr, last_inst.falsebr]
for dest in succs:
if (node, dest) not in cfg._back_edges:
if dest not in seen:
stack.append(dest)
else:
# This node has already added its children. We either need
# to visit the node or it has been added multiple times in
# which case we should just skip the node.
node = stack.pop()
if node not in visited:
post_order.append(node)
visited.add(node)
if node in seen:
# Remove the node from seen if it exists to limit the memory
# usage to 1 entry per node. Otherwise the memory requirement
# can double the recursive version.
seen.remove(node)
post_order.reverse()
return post_order
# other packages that define new nodes add calls to get call table
# format: {type:function}
call_table_extensions = {}
def get_call_table(blocks, call_table=None, reverse_call_table=None, topological_ordering=True):
"""returns a dictionary of call variables and their references.
"""
# call_table example: c = np.zeros becomes c:["zeroes", np]
# reverse_call_table example: c = np.zeros becomes np_var:c
if call_table is None:
call_table = {}
if reverse_call_table is None:
reverse_call_table = {}
if topological_ordering:
order = find_topo_order(blocks)
else:
order = list(blocks.keys())
for label in reversed(order):
for inst in reversed(blocks[label].body):
if isinstance(inst, ir.Assign):
lhs = inst.target.name
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == 'call':
call_table[rhs.func.name] = []
if isinstance(rhs, ir.Expr) and rhs.op == 'getattr':
if lhs in call_table:
call_table[lhs].append(rhs.attr)
reverse_call_table[rhs.value.name] = lhs
if lhs in reverse_call_table:
call_var = reverse_call_table[lhs]
call_table[call_var].append(rhs.attr)
reverse_call_table[rhs.value.name] = call_var
if isinstance(rhs, ir.Global):
if lhs in call_table:
call_table[lhs].append(rhs.value)
if lhs in reverse_call_table:
call_var = reverse_call_table[lhs]
call_table[call_var].append(rhs.value)
if isinstance(rhs, ir.FreeVar):
if lhs in call_table:
call_table[lhs].append(rhs.value)
if lhs in reverse_call_table:
call_var = reverse_call_table[lhs]
call_table[call_var].append(rhs.value)
if isinstance(rhs, ir.Var):
if lhs in call_table:
call_table[lhs].append(rhs.name)
reverse_call_table[rhs.name] = lhs
if lhs in reverse_call_table:
call_var = reverse_call_table[lhs]
call_table[call_var].append(rhs.name)
for T, f in call_table_extensions.items():
if isinstance(inst, T):
f(inst, call_table, reverse_call_table)
return call_table, reverse_call_table
# other packages that define new nodes add calls to get tuple table
# format: {type:function}
tuple_table_extensions = {}
def get_tuple_table(blocks, tuple_table=None):
"""returns a dictionary of tuple variables and their values.
"""
if tuple_table is None:
tuple_table = {}
for block in blocks.values():
for inst in block.body:
if isinstance(inst, ir.Assign):
lhs = inst.target.name
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == 'build_tuple':
tuple_table[lhs] = rhs.items
if isinstance(rhs, ir.Const) and isinstance(rhs.value, tuple):
tuple_table[lhs] = rhs.value
for T, f in tuple_table_extensions.items():
if isinstance(inst, T):
f(inst, tuple_table)
return tuple_table
def get_stmt_writes(stmt):
writes = set()
if isinstance(stmt, (ir.Assign, ir.SetItem, ir.StaticSetItem)):
writes.add(stmt.target.name)
return writes
def rename_labels(blocks):
"""rename labels of function body blocks according to topological sort.
The set of labels of these blocks will remain unchanged.
"""
topo_order = find_topo_order(blocks)
# make a block with return last if available (just for readability)
return_label = -1
for l, b in blocks.items():
if isinstance(b.body[-1], ir.Return):
return_label = l
# some cases like generators can have no return blocks
if return_label != -1:
topo_order.remove(return_label)
topo_order.append(return_label)
label_map = {}
all_labels = sorted(topo_order, reverse=True)
for label in topo_order:
label_map[label] = all_labels.pop()
# update target labels in jumps/branches
for b in blocks.values():
term = b.terminator
# create new IR nodes instead of mutating the existing one as copies of
# the IR may also refer to the same nodes!
if isinstance(term, ir.Jump):
b.body[-1] = ir.Jump(label_map[term.target], term.loc)
if isinstance(term, ir.Branch):
b.body[-1] = ir.Branch(term.cond,
label_map[term.truebr],
label_map[term.falsebr],
term.loc)
# update blocks dictionary keys
new_blocks = {}
for k, b in blocks.items():
new_label = label_map[k]
new_blocks[new_label] = b
return new_blocks
def simplify_CFG(blocks):
"""transform chains of blocks that have no loop into a single block"""
# first, inline single-branch-block to its predecessors
cfg = compute_cfg_from_blocks(blocks)
def find_single_branch(label):
block = blocks[label]
return len(block.body) == 1 and isinstance(block.body[0], ir.Branch)
single_branch_blocks = list(filter(find_single_branch, blocks.keys()))
marked_for_del = set()
for label in single_branch_blocks:
inst = blocks[label].body[0]
predecessors = cfg.predecessors(label)
delete_block = True
for (p, q) in predecessors:
block = blocks[p]
if isinstance(block.body[-1], ir.Jump):
block.body[-1] = copy.copy(inst)
else:
delete_block = False
if delete_block:
marked_for_del.add(label)
# Delete marked labels
for label in marked_for_del:
del blocks[label]
merge_adjacent_blocks(blocks)
return rename_labels(blocks)
arr_math = ['min', 'max', 'sum', 'prod', 'mean', 'var', 'std',
'cumsum', 'cumprod', 'argmax', 'argmin', 'argsort',
'nonzero', 'ravel']
def canonicalize_array_math(func_ir, typemap, calltypes, typingctx):
# save array arg to call
# call_varname -> array
blocks = func_ir.blocks
saved_arr_arg = {}
topo_order = find_topo_order(blocks)
for label in topo_order:
block = blocks[label]
new_body = []
for stmt in block.body:
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
lhs = stmt.target.name
rhs = stmt.value
# replace A.func with np.func, and save A in saved_arr_arg
if (rhs.op == 'getattr' and rhs.attr in arr_math
and isinstance(
typemap[rhs.value.name], types.npytypes.Array)):
rhs = stmt.value
arr = rhs.value
saved_arr_arg[lhs] = arr
scope = arr.scope
loc = arr.loc
# g_np_var = Global(numpy)
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global('np', numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
rhs.value = g_np_var
new_body.append(g_np_assign)
func_ir._definitions[g_np_var.name] = [g_np]
# update func var type
func = getattr(numpy, rhs.attr)
func_typ = get_np_ufunc_typ(func, typingctx)
typemap.pop(lhs)
typemap[lhs] = func_typ
if rhs.op == 'call' and rhs.func.name in saved_arr_arg:
# add array as first arg
arr = saved_arr_arg[rhs.func.name]
# update call type signature to include array arg
old_sig = calltypes.pop(rhs)
# argsort requires kws for typing so sig.args can't be used
# reusing sig.args since some types become Const in sig
argtyps = old_sig.args[:len(rhs.args)]
kwtyps = {name: typemap[v.name] for name, v in rhs.kws}
calltypes[rhs] = typemap[rhs.func.name].get_call_type(
typingctx, [typemap[arr.name]] + list(argtyps), kwtyps)
rhs.args = [arr] + rhs.args
new_body.append(stmt)
block.body = new_body
return
# format: {type:function}
array_accesses_extensions = {}
def get_array_accesses(blocks, accesses=None):
"""returns a set of arrays accessed and their indices.
"""
if accesses is None:
accesses = set()
for block in blocks.values():
for inst in block.body:
if isinstance(inst, ir.SetItem):
accesses.add((inst.target.name, inst.index.name))
if isinstance(inst, ir.StaticSetItem):
accesses.add((inst.target.name, inst.index_var.name))
if isinstance(inst, ir.Assign):
lhs = inst.target.name
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == 'getitem':
accesses.add((rhs.value.name, rhs.index.name))
if isinstance(rhs, ir.Expr) and rhs.op == 'static_getitem':
index = rhs.index
# slice is unhashable, so just keep the variable
if index is None or is_slice_index(index):
index = rhs.index_var.name
accesses.add((rhs.value.name, index))
for T, f in array_accesses_extensions.items():
if isinstance(inst, T):
f(inst, accesses)
return accesses
def is_slice_index(index):
"""see if index is a slice index or has slice in it"""
if isinstance(index, slice):
return True
if isinstance(index, tuple):
for i in index:
if isinstance(i, slice):
return True
return False
def merge_adjacent_blocks(blocks):
cfg = compute_cfg_from_blocks(blocks)
# merge adjacent blocks
removed = set()
for label in list(blocks.keys()):
if label in removed:
continue
block = blocks[label]
succs = list(cfg.successors(label))
while True:
if len(succs) != 1:
break
next_label = succs[0][0]
if next_label in removed:
break
preds = list(cfg.predecessors(next_label))
succs = list(cfg.successors(next_label))
if len(preds) != 1 or preds[0][0] != label:
break
next_block = blocks[next_label]
# XXX: commented out since scope objects are not consistent
# throughout the compiler. for example, pieces of code are compiled
# and inlined on the fly without proper scope merge.
# if block.scope != next_block.scope:
# break
# merge
block.body.pop() # remove Jump
block.body += next_block.body
del blocks[next_label]
removed.add(next_label)
label = next_label
def restore_copy_var_names(blocks, save_copies, typemap):
"""
restores variable names of user variables after applying copy propagation
"""
if not save_copies:
return {}
rename_dict = {}
var_rename_map = {}
for (a, b) in save_copies:
# a is string name, b is variable
# if a is user variable and b is generated temporary and b is not
# already renamed
if (not a.startswith('$') and b.name.startswith('$')
and b.name not in rename_dict):
new_name = mk_unique_var('${}'.format(a));
rename_dict[b.name] = new_name
var_rename_map[new_name] = a
typ = typemap.pop(b.name)
typemap[new_name] = typ
replace_var_names(blocks, rename_dict)
return var_rename_map
def simplify(func_ir, typemap, calltypes, metadata):
# get copies in to blocks and out from blocks
in_cps, _ = copy_propagate(func_ir.blocks, typemap)
# table mapping variable names to ir.Var objects to help replacement
name_var_table = get_name_var_table(func_ir.blocks)
save_copies = apply_copy_propagate(
func_ir.blocks,
in_cps,
name_var_table,
typemap,
calltypes)
var_rename_map = restore_copy_var_names(func_ir.blocks, save_copies, typemap)
if "var_rename_map" not in metadata:
metadata["var_rename_map"] = {}
metadata["var_rename_map"].update(var_rename_map)
# remove dead code to enable fusion
if config.DEBUG_ARRAY_OPT >= 1:
dprint_func_ir(func_ir, "after copy prop")
remove_dead(func_ir.blocks, func_ir.arg_names, func_ir, typemap)
func_ir.blocks = simplify_CFG(func_ir.blocks)
if config.DEBUG_ARRAY_OPT >= 1:
dprint_func_ir(func_ir, "after simplify")
| _MaxLabel |
python | pennersr__django-allauth | allauth/headless/base/views.py | {
"start": 1840,
"end": 2126
} | class ____(APIView):
def get(self, request, *args, **kwargs):
"""
The frontend queries (GET) this endpoint, expecting to receive
either a 401 if no user is authenticated, or user information.
"""
return response.ConfigResponse(request)
| ConfigView |
python | oauthlib__oauthlib | tests/oauth2/rfc8628/test_server.py | {
"start": 223,
"end": 4463
} | class ____(TestCase):
def _configure_endpoint(
self, interval=None, verification_uri_complete=None, user_code_generator=None
):
self.endpoint = DeviceAuthorizationEndpoint(
request_validator=mock.MagicMock(spec=RequestValidator),
verification_uri=self.verification_uri,
interval=interval,
verification_uri_complete=verification_uri_complete,
user_code_generator=user_code_generator,
)
def setUp(self):
self.request_validator = mock.MagicMock(spec=RequestValidator)
self.verification_uri = "http://i.b/l/verify"
self.uri = "http://i.b/l"
self.http_method = "POST"
self.body = "client_id=abc"
self.headers = {"Content-Type": "application/x-www-form-urlencoded"}
self._configure_endpoint()
def response_payload(self):
return self.uri, self.http_method, self.body, self.headers
@mock.patch("oauthlib.oauth2.rfc8628.endpoints.device_authorization.generate_token")
def test_device_authorization_grant(self, generate_token):
generate_token.side_effect = ["abc", "def"]
_, body, status_code = self.endpoint.create_device_authorization_response(
*self.response_payload()
)
expected_payload = {
"verification_uri": "http://i.b/l/verify",
"user_code": "abc",
"device_code": "def",
"expires_in": 1800,
}
self.assertEqual(200, status_code)
self.assertEqual(body, expected_payload)
@mock.patch(
"oauthlib.oauth2.rfc8628.endpoints.device_authorization.generate_token",
lambda: "abc",
)
def test_device_authorization_grant_interval(self):
self._configure_endpoint(interval=5)
_, body, _ = self.endpoint.create_device_authorization_response(*self.response_payload())
self.assertEqual(5, body["interval"])
@mock.patch(
"oauthlib.oauth2.rfc8628.endpoints.device_authorization.generate_token",
lambda: "abc",
)
def test_device_authorization_grant_interval_with_zero(self):
self._configure_endpoint(interval=0)
_, body, _ = self.endpoint.create_device_authorization_response(*self.response_payload())
self.assertEqual(0, body["interval"])
@mock.patch(
"oauthlib.oauth2.rfc8628.endpoints.device_authorization.generate_token",
lambda: "abc",
)
def test_device_authorization_grant_verify_url_complete_string(self):
self._configure_endpoint(verification_uri_complete="http://i.l/v?user_code={user_code}")
_, body, _ = self.endpoint.create_device_authorization_response(*self.response_payload())
self.assertEqual(
"http://i.l/v?user_code=abc",
body["verification_uri_complete"],
)
@mock.patch(
"oauthlib.oauth2.rfc8628.endpoints.device_authorization.generate_token",
lambda: "abc",
)
def test_device_authorization_grant_verify_url_complete_callable(self):
self._configure_endpoint(verification_uri_complete=lambda u: f"http://i.l/v?user_code={u}")
_, body, _ = self.endpoint.create_device_authorization_response(*self.response_payload())
self.assertEqual(
"http://i.l/v?user_code=abc",
body["verification_uri_complete"],
)
@mock.patch(
"oauthlib.oauth2.rfc8628.endpoints.device_authorization.generate_token",
lambda: "abc",
)
def test_device_authorization_grant_user_gode_generator(self):
def user_code():
"""
A friendly user code the device can display and the user
can type in. It's up to the device how
this code should be displayed. e.g 123-456
"""
return "123456"
self._configure_endpoint(
verification_uri_complete=lambda u: f"http://i.l/v?user_code={u}",
user_code_generator=user_code,
)
_, body, _ = self.endpoint.create_device_authorization_response(*self.response_payload())
self.assertEqual(
"http://i.l/v?user_code=123456",
body["verification_uri_complete"],
)
| DeviceAuthorizationEndpointTest |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/resource_tests/pythonic_resources/test_general_pythonic_resources.py | {
"start": 8116,
"end": 33679
} | class ____(dg.IOManager):
def __init__(self, a_config_value: str):
self.a_config_value = a_config_value
def load_input(self, _): # pyright: ignore[reportIncompatibleMethodOverride]
pass
def handle_output(self, _, obj): # pyright: ignore[reportIncompatibleMethodOverride]
pass
def test_io_manager_adapter():
@dg.io_manager(config_schema={"a_config_value": str})
def an_io_manager(context: InitResourceContext) -> AnIOManagerImplementation:
return AnIOManagerImplementation(context.resource_config["a_config_value"])
class AdapterForIOManager(dg.ConfigurableLegacyIOManagerAdapter):
a_config_value: str
@property
def wrapped_io_manager(self) -> dg.IOManagerDefinition:
return an_io_manager
executed = {}
@dg.asset
def an_asset(context: AssetExecutionContext):
assert context.resources.io_manager.a_config_value == "passed-in-configured"
executed["yes"] = True
defs = dg.Definitions(
assets=[an_asset],
resources={"io_manager": AdapterForIOManager(a_config_value="passed-in-configured")},
)
defs.resolve_implicit_global_asset_job_def().execute_in_process()
assert executed["yes"]
def test_io_manager_factory_class():
# now test without the adapter
class AnIOManagerFactory(dg.ConfigurableIOManagerFactory):
a_config_value: str
def create_io_manager(self, _) -> dg.IOManager: # pyright: ignore[reportIncompatibleMethodOverride]
"""Implement as one would implement a @io_manager decorator function."""
return AnIOManagerImplementation(self.a_config_value)
executed = {}
@dg.asset
def another_asset(context: AssetExecutionContext):
assert context.resources.io_manager.a_config_value == "passed-in-factory"
executed["yes"] = True
defs = dg.Definitions(
assets=[another_asset],
resources={"io_manager": AnIOManagerFactory(a_config_value="passed-in-factory")},
)
defs.resolve_implicit_global_asset_job_def().execute_in_process()
assert executed["yes"]
def test_structured_resource_runtime_config():
out_txt = []
class WriterResource(dg.ConfigurableResource):
prefix: str
def output(self, text: str) -> None:
out_txt.append(f"{self.prefix}{text}")
@dg.asset
def hello_world_asset(writer: WriterResource):
writer.output("hello, world!")
defs = dg.Definitions(
assets=[hello_world_asset],
resources={"writer": WriterResource.configure_at_launch()},
)
assert (
defs.resolve_implicit_global_asset_job_def()
.execute_in_process({"resources": {"writer": {"config": {"prefix": ""}}}})
.success
)
assert out_txt == ["hello, world!"]
out_txt.clear()
assert (
defs.resolve_implicit_global_asset_job_def()
.execute_in_process({"resources": {"writer": {"config": {"prefix": "greeting: "}}}})
.success
)
assert out_txt == ["greeting: hello, world!"]
def test_runtime_config_run_config_obj():
# Use RunConfig to specify resource config
# in a structured format at runtime rather than using a dict
out_txt = []
class WriterResource(dg.ConfigurableResource):
prefix: str
def output(self, text: str) -> None:
out_txt.append(f"{self.prefix}{text}")
@dg.asset
def hello_world_asset(writer: WriterResource):
writer.output("hello, world!")
defs = dg.Definitions(
assets=[hello_world_asset],
resources={"writer": WriterResource.configure_at_launch()},
)
assert (
defs.resolve_implicit_global_asset_job_def()
.execute_in_process(dg.RunConfig(resources={"writer": WriterResource(prefix="greeting: ")}))
.success
)
assert out_txt == ["greeting: hello, world!"]
def test_basic_enum_override_with_resource_instance() -> None:
class BasicEnum(enum.Enum):
A = "a_value"
B = "b_value"
setup_executed = {}
class MyResource(dg.ConfigurableResource):
my_enum: BasicEnum
def setup_for_execution(self, context: InitResourceContext) -> None:
setup_executed["yes"] = True
assert context.resource_config["my_enum"] in [
BasicEnum.A.value,
BasicEnum.B.value,
]
@dg.asset
def asset_with_resource(context, my_resource: MyResource):
return my_resource.my_enum.value
result_one = dg.materialize(
[asset_with_resource],
resources={"my_resource": MyResource(my_enum=BasicEnum.A)},
)
assert result_one.success
assert result_one.output_for_node("asset_with_resource") == "a_value"
assert setup_executed["yes"]
setup_executed.clear()
result_two = dg.materialize(
[asset_with_resource],
resources={"my_resource": MyResource(my_enum=BasicEnum.A)},
run_config={"resources": {"my_resource": {"config": {"my_enum": "B"}}}},
)
assert result_two.success
assert result_two.output_for_node("asset_with_resource") == "b_value"
assert setup_executed["yes"]
def test_basic_enum_override_with_resource_configured_at_launch() -> None:
class AnotherEnum(enum.Enum):
A = "a_value"
B = "b_value"
class MyResource(dg.ConfigurableResource):
my_enum: AnotherEnum
@dg.asset
def asset_with_resource(context, my_resource: MyResource):
return my_resource.my_enum.value
result_one = dg.materialize(
[asset_with_resource],
resources={"my_resource": MyResource.configure_at_launch()},
run_config={"resources": {"my_resource": {"config": {"my_enum": "B"}}}},
)
assert result_one.success
assert result_one.output_for_node("asset_with_resource") == "b_value"
result_two = dg.materialize(
[asset_with_resource],
resources={"my_resource": MyResource.configure_at_launch(my_enum=AnotherEnum.A)},
run_config={"resources": {"my_resource": {"config": {"my_enum": "B"}}}},
)
assert result_two.success
assert result_two.output_for_node("asset_with_resource") == "b_value"
def test_resources_which_return():
class StringResource(ConfigurableResourceFactory[str]):
a_string: str
def create_resource(self, context) -> str:
return self.a_string
class MyResource(dg.ConfigurableResource):
string_from_resource: dg.ResourceDependency[str]
completed = {}
@dg.asset
def my_asset(my_resource: MyResource):
assert my_resource.string_from_resource == "foo"
completed["yes"] = True
str_resource = StringResource(a_string="foo")
my_resource = MyResource(string_from_resource=str_resource)
defs = dg.Definitions(
assets=[my_asset],
resources={
"my_resource": my_resource,
},
)
assert defs.resolve_implicit_global_asset_job_def().execute_in_process().success
assert completed["yes"]
str_resource_partial = StringResource.configure_at_launch()
my_resource = MyResource(string_from_resource=str_resource_partial) # pyright: ignore[reportArgumentType]
defs = dg.Definitions(
assets=[my_asset],
resources={
"str_resource_partial": str_resource_partial,
"my_resource": my_resource,
},
)
assert (
defs.resolve_implicit_global_asset_job_def()
.execute_in_process(
{
"resources": {
"str_resource_partial": {
"config": {
"a_string": "foo",
},
}
}
}
)
.success
)
assert completed["yes"]
def test_nested_config_class() -> None:
# Validate that we can nest Config classes in a pythonic resource
class User(dg.Config):
name: str
age: int
class UsersResource(dg.ConfigurableResource):
users: list[User]
executed = {}
@dg.asset
def an_asset(users_resource: UsersResource):
assert len(users_resource.users) == 2
assert users_resource.users[0].name == "Bob"
assert users_resource.users[0].age == 25
assert users_resource.users[1].name == "Alice"
assert users_resource.users[1].age == 30
executed["yes"] = True
defs = dg.Definitions(
assets=[an_asset],
resources={
"users_resource": UsersResource(
users=[
User(name="Bob", age=25),
User(name="Alice", age=30),
]
)
},
)
assert defs.resolve_implicit_global_asset_job_def().execute_in_process().success
assert executed["yes"]
# https://github.com/dagster-io/dagster/issues/27223
@pytest.mark.parametrize("child_resource_fields_all_have_default_values", [True, False])
def test_nested_config_class_with_runtime_config(
child_resource_fields_all_have_default_values: bool,
) -> None:
# Type hinting a dynamically-generated Pydantic model is impossible:
# https://stackoverflow.com/q/78838473
ChildResource = create_model(
"ChildResource",
date=(str, "2025-01-20" if child_resource_fields_all_have_default_values else ...),
__base__=ConfigurableResource,
)
class ParentResource(dg.ConfigurableResource):
child: ChildResource # pyright: ignore[reportInvalidTypeForm]
@dg.asset
def test_asset(
child: ChildResource, # pyright: ignore[reportInvalidTypeForm]
parent: ParentResource,
) -> None:
assert child.date == "2025-01-21"
assert parent.child.date == "2025-01-21"
child = ChildResource.configure_at_launch()
dg.materialize(
[test_asset],
resources={
"child": child,
"parent": ParentResource.configure_at_launch(child=child),
},
run_config={
"loggers": {"console": {"config": {"log_level": "ERROR"}}},
"resources": {"child": {"config": {"date": "2025-01-21"}}},
},
)
def test_using_enum_simple() -> None:
executed = {}
class SimpleEnum(enum.Enum):
FOO = "foo"
BAR = "bar"
class MyResource(dg.ConfigurableResource):
an_enum: SimpleEnum
@dg.asset
def an_asset(my_resource: MyResource):
assert my_resource.an_enum == SimpleEnum.FOO
executed["yes"] = True
defs = dg.Definitions(
assets=[an_asset],
resources={
"my_resource": MyResource(
an_enum=SimpleEnum.FOO,
)
},
)
assert defs.resolve_implicit_global_asset_job_def().execute_in_process().success
assert executed["yes"]
executed.clear()
defs = dg.Definitions(
assets=[an_asset],
resources={
"my_resource": MyResource.configure_at_launch(),
},
)
assert (
defs.resolve_implicit_global_asset_job_def()
.execute_in_process(
{"resources": {"my_resource": {"config": {"an_enum": SimpleEnum.FOO.name}}}}
)
.success
)
assert executed["yes"]
def test_using_enum_complex() -> None:
executed = {}
class MyEnum(enum.Enum):
FOO = "foo"
BAR = "bar"
class MyResource(dg.ConfigurableResource):
list_of_enums: list[MyEnum]
optional_enum: Optional[MyEnum] = None
@dg.asset
def an_asset(my_resource: MyResource):
assert my_resource.optional_enum is None
assert my_resource.list_of_enums == [MyEnum.FOO, MyEnum.BAR]
executed["yes"] = True
defs = dg.Definitions(
assets=[an_asset],
resources={
"my_resource": MyResource(
list_of_enums=[MyEnum.FOO, MyEnum.BAR],
)
},
)
assert defs.resolve_implicit_global_asset_job_def().execute_in_process().success
assert executed["yes"]
executed.clear()
def test_resource_defs_on_asset() -> None:
executed = {}
class MyResource(dg.ConfigurableResource):
a_str: str
@dg.asset(resource_defs={"my_resource": MyResource(a_str="foo")})
def an_asset(my_resource: MyResource):
assert my_resource.a_str == "foo"
executed["yes"] = True
defs = dg.Definitions(
assets=[an_asset],
)
defs.resolve_implicit_global_asset_job_def().execute_in_process()
assert executed["yes"]
# Cannot specify both required_resource_keys and resources as args
with pytest.raises(CheckError):
@dg.asset(required_resource_keys={"my_other_resource"})
def an_other_asset(my_resource: MyResource):
pass
def test_extending_resource() -> None:
executed = {}
class BaseResource(dg.ConfigurableResource):
a_str: str = "bar"
an_int: int = 1
class ExtendingResource(BaseResource):
a_float: float = 1.0
@dg.op
def hello_world_op(writer: ExtendingResource):
assert writer.a_str == "foo"
assert writer.an_int == 1
assert writer.a_float == 1.0
executed["yes"] = True
@dg.job(resource_defs={"writer": ExtendingResource(a_str="foo")})
def no_prefix_job() -> None:
hello_world_op()
assert no_prefix_job.execute_in_process().success
assert executed["yes"]
def test_extending_resource_nesting() -> None:
executed = {}
class NestedResource(dg.ConfigurableResource):
a_str: str
class BaseResource(dg.ConfigurableResource):
nested: NestedResource
a_str: str = "bar"
an_int: int = 1
class ExtendingResource(BaseResource):
a_float: float = 1.0
@dg.asset
def an_asset(writer: ExtendingResource):
assert writer.a_str == "foo"
assert writer.nested.a_str == "baz"
assert writer.an_int == 1
assert writer.a_float == 1.0
executed["yes"] = True
defs = dg.Definitions(
assets=[an_asset],
resources={"writer": ExtendingResource(a_str="foo", nested=NestedResource(a_str="baz"))},
)
assert defs.resolve_implicit_global_asset_job_def().execute_in_process().success
assert executed["yes"]
executed.clear()
nested_defer = NestedResource.configure_at_launch()
defs = dg.Definitions(
assets=[an_asset],
resources={
"nested_deferred": nested_defer,
"writer": ExtendingResource(a_str="foo", nested=nested_defer),
},
)
assert (
defs.resolve_implicit_global_asset_job_def()
.execute_in_process(
run_config={"resources": {"nested_deferred": {"config": {"a_str": "baz"}}}}
)
.success
)
assert executed["yes"]
def test_execute_in_process() -> None:
out_txt = []
class WriterResource(dg.ConfigurableResource):
prefix: str
def output(self, text: str) -> None:
out_txt.append(f"{self.prefix}{text}")
@dg.op
def hello_world_op(writer: WriterResource):
writer.output("hello, world!")
@dg.job
def hello_world_job() -> None:
hello_world_op()
with pytest.raises(
dg.DagsterInvalidDefinitionError,
match="resource with key 'writer' required by op 'hello_world_op' was not provided",
):
hello_world_job.execute_in_process()
assert not out_txt
# Bind resource as part of calling execute_in_process
assert hello_world_job.execute_in_process(
resources={"writer": WriterResource(prefix="msg: ")}
).success
assert out_txt == ["msg: hello, world!"]
def test_aliased_field_structured_resource():
out_txt = []
class WriterResource(dg.ConfigurableResource):
prefix_: str = PyField(..., alias="prefix")
def output(self, text: str) -> None:
out_txt.append(f"{self.prefix_}{text}")
@dg.op
def hello_world_op(writer: WriterResource):
writer.output("hello, world!")
@dg.job(resource_defs={"writer": WriterResource(prefix="")})
def no_prefix_job():
hello_world_op()
assert no_prefix_job.execute_in_process().success
assert out_txt == ["hello, world!"]
out_txt.clear()
@dg.job(resource_defs={"writer": WriterResource(prefix="greeting: ")})
def prefix_job():
hello_world_op()
assert prefix_job.execute_in_process().success
assert out_txt == ["greeting: hello, world!"]
out_txt.clear()
@dg.job(resource_defs={"writer": WriterResource.configure_at_launch()})
def prefix_job_at_runtime():
hello_world_op()
assert prefix_job_at_runtime.execute_in_process(
{"resources": {"writer": {"config": {"prefix": "runtime: "}}}}
).success
assert out_txt == ["runtime: hello, world!"]
def test_from_resource_context_and_to_config_field() -> None:
class StringResource(ConfigurableResourceFactory[str]):
a_string: str
def create_resource(self, context) -> str:
return self.a_string + "bar"
@dg.resource(config_schema=StringResource.to_config_schema())
def string_resource_function_style(context: InitResourceContext) -> str:
return StringResource.from_resource_context(context)
assert (
string_resource_function_style(dg.build_init_resource_context({"a_string": "foo"}))
== "foobar"
)
def test_from_resource_context_and_to_config_field_complex() -> None:
class MyComplexConfigResource(dg.ConfigurableResource):
a_string: str
a_list_of_ints: list[int]
a_map_of_lists_of_maps_of_floats: Mapping[str, list[Mapping[str, float]]]
@dg.resource(config_schema=MyComplexConfigResource.to_config_schema())
def complex_config_resource_function_style(
context: InitResourceContext,
) -> MyComplexConfigResource:
return MyComplexConfigResource.from_resource_context(context)
complex_config_resource = complex_config_resource_function_style(
dg.build_init_resource_context(
{
"a_string": "foo",
"a_list_of_ints": [1, 2, 3],
"a_map_of_lists_of_maps_of_floats": {
"a": [{"b": 1.0}, {"c": 2.0}],
"d": [{"e": 3.0}, {"f": 4.0}],
},
}
)
)
assert complex_config_resource.a_string == "foo"
assert complex_config_resource.a_list_of_ints == [1, 2, 3]
assert complex_config_resource.a_map_of_lists_of_maps_of_floats == {
"a": [{"b": 1.0}, {"c": 2.0}],
"d": [{"e": 3.0}, {"f": 4.0}],
}
def test_from_resource_context_and_to_config_empty() -> None:
class NoConfigResource(dg.ConfigurableResource[str]):
def get_string(self) -> str:
return "foo"
@dg.resource(config_schema=NoConfigResource.to_config_schema())
def string_resource_function_style(context: InitResourceContext) -> str:
return NoConfigResource.from_resource_context(context).get_string() # type: ignore # (??)
assert string_resource_function_style(dg.build_init_resource_context()) == "foo"
def test_context_on_resource_basic() -> None:
executed = {}
class ContextUsingResource(dg.ConfigurableResource):
def access_context(self) -> None:
self.get_resource_context()
with pytest.raises(
CheckError, match="Attempted to get context before resource was initialized."
):
ContextUsingResource().access_context()
# Can access context after binding one
ContextUsingResource().with_replaced_resource_context(
dg.build_init_resource_context()
).access_context()
@dg.asset
def my_test_asset(context_using: ContextUsingResource) -> None:
context_using.access_context()
executed["yes"] = True
defs = dg.Definitions(
assets=[my_test_asset],
resources={"context_using": ContextUsingResource()},
)
assert defs.resolve_implicit_global_asset_job_def().execute_in_process().success
assert executed["yes"]
def test_context_on_resource_use_instance() -> None:
executed = {}
class OutputDirResource(dg.ConfigurableResource):
output_dir: Optional[str] = None
def get_effective_output_dir(self) -> str:
if self.output_dir:
return self.output_dir
context = self.get_resource_context()
assert context.instance
return context.instance.storage_directory()
with pytest.raises(
CheckError, match="Attempted to get context before resource was initialized."
):
OutputDirResource(output_dir=None).get_effective_output_dir()
with mock.patch(
"dagster._core.instance.DagsterInstance.storage_directory"
) as storage_directory:
storage_directory.return_value = "/tmp"
with DagsterInstance.ephemeral() as instance:
assert (
OutputDirResource(output_dir=None)
.with_replaced_resource_context(dg.build_init_resource_context(instance=instance))
.get_effective_output_dir()
== "/tmp"
)
@dg.asset
def my_other_output_asset(output_dir: OutputDirResource) -> None:
assert output_dir.get_effective_output_dir() == "/tmp"
executed["yes"] = True
defs = dg.Definitions(
assets=[my_other_output_asset],
resources={"output_dir": OutputDirResource()},
)
assert defs.resolve_implicit_global_asset_job_def().execute_in_process().success
assert executed["yes"]
def test_context_on_resource_runtime_config() -> None:
executed = {}
class OutputDirResource(dg.ConfigurableResource):
output_dir: Optional[str] = None
def get_effective_output_dir(self) -> str:
if self.output_dir:
return self.output_dir
context = self.get_resource_context()
assert context.instance
return context.instance.storage_directory()
with mock.patch(
"dagster._core.instance.DagsterInstance.storage_directory"
) as storage_directory:
storage_directory.return_value = "/tmp"
@dg.asset
def my_other_output_asset(output_dir: OutputDirResource) -> None:
assert output_dir.get_effective_output_dir() == "/tmp"
executed["yes"] = True
defs = dg.Definitions(
assets=[my_other_output_asset],
resources={"output_dir": OutputDirResource.configure_at_launch()},
)
assert (
defs.resolve_implicit_global_asset_job_def()
.execute_in_process(
run_config={"resources": {"output_dir": {"config": {"output_dir": None}}}}
)
.success
)
assert executed["yes"]
def test_context_on_resource_nested() -> None:
executed = {}
class OutputDirResource(dg.ConfigurableResource):
output_dir: Optional[str] = None
def get_effective_output_dir(self) -> str:
if self.output_dir:
return self.output_dir
context = self.get_resource_context()
assert context.instance
return context.instance.storage_directory()
class OutputDirWrapperResource(dg.ConfigurableResource):
output_dir: OutputDirResource
with pytest.raises(
CheckError, match="Attempted to get context before resource was initialized."
):
OutputDirWrapperResource(
output_dir=OutputDirResource(output_dir=None)
).output_dir.get_effective_output_dir()
with mock.patch(
"dagster._core.instance.DagsterInstance.storage_directory"
) as storage_directory:
storage_directory.return_value = "/tmp"
@dg.asset
def my_other_output_asset(wrapper: OutputDirWrapperResource) -> None:
assert wrapper.output_dir.get_effective_output_dir() == "/tmp"
executed["yes"] = True
defs = dg.Definitions(
assets=[my_other_output_asset],
resources={"wrapper": OutputDirWrapperResource(output_dir=OutputDirResource())},
)
assert defs.resolve_implicit_global_asset_job_def().execute_in_process().success
assert executed["yes"]
def test_telemetry_custom_resource():
class MyResource(dg.ConfigurableResource):
my_value: str
@classmethod
def _is_dagster_maintained(cls) -> bool:
return False
assert not MyResource(my_value="foo")._is_dagster_maintained() # noqa: SLF001
def test_telemetry_dagster_resource():
class MyResource(dg.ConfigurableResource):
my_value: str
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
assert MyResource(my_value="foo")._is_dagster_maintained() # noqa: SLF001
def test_partial_resource_checks() -> None:
class IntResource(dg.ConfigurableResource):
my_int: int
class StrResource(dg.ConfigurableResource):
my_str: str
class MergeResource(dg.ConfigurableResource):
str_res: StrResource
int_res: IntResource
MergeResource(
str_res=StrResource.configure_at_launch(),
int_res=IntResource.configure_at_launch(),
)
# this should fail but does not https://github.com/dagster-io/dagster/issues/18017
MergeResource(
int_res=StrResource.configure_at_launch(),
str_res=IntResource.configure_at_launch(),
)
| AnIOManagerImplementation |
python | dagster-io__dagster | python_modules/libraries/dagster-sigma/dagster_sigma/components/sigma_component.py | {
"start": 2725,
"end": 4559
} | class ____(AssetSpecUpdateKwargs, Resolvable):
for_workbook: Optional[ResolvedTargetedSigmaTranslationFn] = None
for_dataset: Optional[ResolvedTargetedSigmaTranslationFn] = None
def resolve_multilayer_translation(context: ResolutionContext, model):
"""The Sigma translation schema supports defining global transforms
as well as per-content-type transforms. This resolver composes the
per-content-type transforms with the global transforms.
"""
info = TranslatorResolvingInfo(
asset_attributes=model,
resolution_context=context,
model_key="translation",
)
def _translation_fn(base_asset_spec: AssetSpec, data: SigmaTranslatorData):
processed_spec = info.get_asset_spec(
base_asset_spec,
{
"data": data,
"spec": base_asset_spec,
},
)
nested_translation_fns = resolve_fields(
model=model,
resolved_cls=SigmaAssetArgs,
context=context.with_scope(
**{
"data": data,
"spec": processed_spec,
}
),
)
for_workbook = nested_translation_fns.get("for_workbook")
for_dataset = nested_translation_fns.get("for_dataset")
if isinstance(data, SigmaWorkbookTranslatorData) and for_workbook:
return for_workbook(processed_spec, data)
if isinstance(data, SigmaDatasetTranslatorData) and for_dataset:
return for_dataset(processed_spec, data)
return processed_spec
return _translation_fn
ResolvedMultilayerTranslationFn: TypeAlias = Annotated[
TranslationFn,
Resolver(
resolve_multilayer_translation,
model_field_type=Union[str, SigmaAssetArgs.model()],
),
]
| SigmaAssetArgs |
python | pyca__cryptography | tests/hazmat/primitives/test_ec.py | {
"start": 29186,
"end": 48176
} | class ____:
@pytest.mark.parametrize(
("fmt", "password"),
itertools.product(
[
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.PrivateFormat.PKCS8,
],
[
b"s",
b"longerpassword",
b"!*$&(@#$*&($T@%_somesymbols",
b"\x01" * 1000,
],
),
)
def test_private_bytes_encrypted_pem(self, backend, fmt, password):
skip_fips_traditional_openssl(backend, fmt)
_skip_curve_unsupported(backend, ec.SECP256R1())
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"),
lambda pemfile: pemfile.read().encode(),
)
key = serialization.load_pem_private_key(key_bytes, None, backend)
assert isinstance(key, ec.EllipticCurvePrivateKey)
serialized = key.private_bytes(
serialization.Encoding.PEM,
fmt,
serialization.BestAvailableEncryption(password),
)
loaded_key = serialization.load_pem_private_key(
serialized, password, backend
)
assert isinstance(loaded_key, ec.EllipticCurvePrivateKey)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.supported(
only_if=lambda backend: backend._fips_enabled,
skip_message="Requires FIPS",
)
def test_traditional_serialization_fips(self, backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"),
lambda pemfile: pemfile.read().encode(),
)
key = serialization.load_pem_private_key(key_bytes, None, backend)
assert isinstance(key, ec.EllipticCurvePrivateKey)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.BestAvailableEncryption(b"password"),
)
@pytest.mark.parametrize(
("encoding", "fmt"),
[
(serialization.Encoding.Raw, serialization.PrivateFormat.PKCS8),
(serialization.Encoding.DER, serialization.PrivateFormat.Raw),
(serialization.Encoding.Raw, serialization.PrivateFormat.Raw),
(serialization.Encoding.X962, serialization.PrivateFormat.PKCS8),
],
)
def test_private_bytes_rejects_invalid(self, encoding, fmt, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
key = ec.generate_private_key(ec.SECP256R1(), backend)
with pytest.raises((TypeError, ValueError)):
key.private_bytes(encoding, fmt, serialization.NoEncryption())
@pytest.mark.parametrize(
("fmt", "password"),
[
[serialization.PrivateFormat.PKCS8, b"s"],
[serialization.PrivateFormat.PKCS8, b"longerpassword"],
[serialization.PrivateFormat.PKCS8, b"!*$&(@#$*&($T@%_somesymbol"],
[serialization.PrivateFormat.PKCS8, b"\x01" * 1000],
],
)
def test_private_bytes_encrypted_der(self, backend, fmt, password):
_skip_curve_unsupported(backend, ec.SECP256R1())
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"),
lambda pemfile: pemfile.read().encode(),
)
key = serialization.load_pem_private_key(key_bytes, None, backend)
assert isinstance(key, ec.EllipticCurvePrivateKey)
serialized = key.private_bytes(
serialization.Encoding.DER,
fmt,
serialization.BestAvailableEncryption(password),
)
loaded_key = serialization.load_der_private_key(
serialized, password, backend
)
assert isinstance(loaded_key, ec.EllipticCurvePrivateKey)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.parametrize(
("encoding", "fmt", "loader_func"),
[
[
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.load_pem_private_key,
],
[
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.load_der_private_key,
],
[
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.load_pem_private_key,
],
[
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.load_der_private_key,
],
],
)
def test_private_bytes_unencrypted(
self, backend, encoding, fmt, loader_func
):
_skip_curve_unsupported(backend, ec.SECP256R1())
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"),
lambda pemfile: pemfile.read().encode(),
)
key = serialization.load_pem_private_key(key_bytes, None, backend)
assert isinstance(key, ec.EllipticCurvePrivateKey)
serialized = key.private_bytes(
encoding, fmt, serialization.NoEncryption()
)
loaded_key = loader_func(serialized, None, backend)
assert isinstance(loaded_key, ec.EllipticCurvePrivateKey)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.skip_fips(
reason="Traditional OpenSSL key format is not supported in FIPS mode."
)
@pytest.mark.parametrize(
("key_path", "encoding", "loader_func"),
[
[
os.path.join(
"asymmetric", "PEM_Serialization", "ec_private_key.pem"
),
serialization.Encoding.PEM,
serialization.load_pem_private_key,
],
[
os.path.join(
"asymmetric", "DER_Serialization", "ec_private_key.der"
),
serialization.Encoding.DER,
serialization.load_der_private_key,
],
],
)
def test_private_bytes_traditional_openssl_unencrypted(
self, backend, key_path, encoding, loader_func
):
_skip_curve_unsupported(backend, ec.SECP256R1())
key_bytes = load_vectors_from_file(
key_path, lambda pemfile: pemfile.read(), mode="rb"
)
key = loader_func(key_bytes, None, backend)
serialized = key.private_bytes(
encoding,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption(),
)
assert serialized == key_bytes
def test_private_bytes_traditional_der_encrypted_invalid(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read().encode(), None, backend
),
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.BestAvailableEncryption(b"password"),
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.SMIME,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption(),
)
def test_private_bytes_invalid_encoding(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read().encode(), None, backend
),
)
with pytest.raises(TypeError):
key.private_bytes(
"notencoding", # type: ignore[arg-type]
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
)
def test_private_bytes_invalid_format(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read().encode(), None, backend
),
)
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
"invalidformat", # type: ignore[arg-type]
serialization.NoEncryption(),
)
def test_private_bytes_invalid_encryption_algorithm(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read().encode(), None, backend
),
)
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
"notanencalg", # type: ignore[arg-type]
)
def test_private_bytes_unsupported_encryption_type(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read().encode(), None, backend
),
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
DummyKeySerializationEncryption(),
)
def test_public_bytes_from_derived_public_key(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec_private_key.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read().encode(), None, backend
),
)
public = key.public_key()
pem = public.public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
parsed_public = serialization.load_pem_public_key(pem, backend)
assert parsed_public
def test_load_private_key_unsupported_explicit_parameters(self):
# This vector is P256 except the prime field value is wrong
with pytest.raises(
exceptions.UnsupportedAlgorithm, match="explicit parameters"
):
load_vectors_from_file(
os.path.join(
"asymmetric", "EC", "explicit_parameters_private_key.pem"
),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read(), password=None
),
mode="rb",
)
with pytest.raises(
exceptions.UnsupportedAlgorithm, match="explicit parameters"
):
# This vector encodes SECT233R1 explicitly
load_vectors_from_file(
os.path.join(
"asymmetric",
"EC",
"explicit_parameters_wap_wsg_idm_ecid_wtls11_private_key.pem",
),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read(), password=None
),
mode="rb",
)
@pytest.mark.parametrize(
("curve", "file"),
[
(ec.SECP256R1, "secp256r1-explicit-seed.pem"),
(ec.SECP256R1, "secp256r1-explicit-no-seed.pem"),
(ec.SECP384R1, "secp384r1-explicit-seed.pem"),
(ec.SECP384R1, "secp384r1-explicit-no-seed.pem"),
(ec.SECP521R1, "secp521r1-explicit-seed.pem"),
(ec.SECP521R1, "secp521r1-explicit-no-seed.pem"),
],
)
def test_load_private_key_explicit_parameters(self, curve, file, backend):
_skip_curve_unsupported(backend, curve())
key = load_vectors_from_file(
os.path.join("asymmetric", "EC", file),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read(), password=None
),
mode="rb",
)
assert isinstance(key, ec.EllipticCurvePrivateKey)
assert isinstance(key.curve, curve)
@pytest.mark.parametrize(
("curve", "file"),
[
(ec.SECP256R1, "secp256r1-pub-explicit-seed.pem"),
(ec.SECP256R1, "secp256r1-pub-explicit-no-seed.pem"),
(ec.SECP384R1, "secp384r1-pub-explicit-seed.pem"),
(ec.SECP384R1, "secp384r1-pub-explicit-no-seed.pem"),
(ec.SECP521R1, "secp521r1-pub-explicit-seed.pem"),
(ec.SECP521R1, "secp521r1-pub-explicit-no-seed.pem"),
],
)
def test_load_public_key_explicit_parameters(self, curve, file, backend):
_skip_curve_unsupported(backend, curve())
key = load_vectors_from_file(
os.path.join("asymmetric", "EC", file),
lambda pemfile: serialization.load_pem_public_key(pemfile.read()),
mode="rb",
)
assert isinstance(key, ec.EllipticCurvePublicKey)
assert isinstance(key.curve, curve)
def test_load_private_key_unsupported_curve(self):
with pytest.raises((ValueError, exceptions.UnsupportedAlgorithm)):
load_vectors_from_file(
os.path.join("asymmetric", "EC", "secp128r1_private_key.pem"),
lambda pemfile: serialization.load_pem_private_key(
pemfile.read(), password=None
),
mode="rb",
)
@pytest.mark.parametrize(
("key_file", "curve"),
[
("sect163k1-spki.pem", ec.SECT163K1),
("sect163r2-spki.pem", ec.SECT163R2),
("sect233k1-spki.pem", ec.SECT233K1),
("sect233r1-spki.pem", ec.SECT233R1),
],
)
def test_load_public_keys(self, key_file, curve, backend):
_skip_curve_unsupported(backend, curve())
key = load_vectors_from_file(
os.path.join("asymmetric", "EC", key_file),
lambda pemfile: serialization.load_pem_public_key(
pemfile.read(),
),
mode="rb",
)
assert isinstance(key, ec.EllipticCurvePublicKey)
assert isinstance(key.curve, curve)
def test_pkcs8_inconsistent_curve(self):
# The curve can appear twice in a PKCS8 EC key, error if they're not
# consistent
data = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec-inconsistent-curve.pem"),
lambda f: f.read(),
mode="rb",
)
with pytest.raises(ValueError):
serialization.load_pem_private_key(data, password=None)
data = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec-inconsistent-curve2.pem"),
lambda f: f.read(),
mode="rb",
)
with pytest.raises(ValueError):
serialization.load_pem_private_key(data, password=None)
def test_pkcs8_consistent_curve(self):
# Like the above, but both the inner and outer curves match
key = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec-consistent-curve.pem"),
lambda f: serialization.load_pem_private_key(
f.read(), password=None
),
mode="rb",
)
assert isinstance(key, EllipticCurvePrivateKey)
assert isinstance(key.curve, ec.SECP256R1)
def test_load_private_key_missing_curve(self):
data = load_vectors_from_file(
os.path.join("asymmetric", "EC", "ec-missing-curve.pem"),
lambda f: f.read(),
mode="rb",
)
with pytest.raises(ValueError):
serialization.load_pem_private_key(data, password=None)
def test_load_private_key_invalid_version(self):
data = load_vectors_from_file(
os.path.join("asymmetric", "PKCS8", "ec-invalid-version.pem"),
lambda f: f.read(),
mode="rb",
)
with pytest.raises(ValueError):
serialization.load_pem_private_key(data, password=None)
def test_private_bytes_high_private_key_bit_set(self):
data = load_vectors_from_file(
os.path.join("asymmetric", "EC", "high-bit-set.pem"),
lambda f: f.read(),
mode="rb",
)
key = serialization.load_pem_private_key(data, password=None)
assert isinstance(key, ec.EllipticCurvePrivateKey)
# The high bit is set in the private key. Ensure that it's not
# serialized with an additional leading 0, as you would if serializing
# an ASN.1 integer.
expected_private_key = (
0xA07AB72DF25722849DF17FCE9AF1D2AC02EFA32C3251D8E075C29EA868D9E2A2
)
assert key.private_numbers().private_value == expected_private_key
assert (
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption(),
)
== data
)
def test_private_bytes_small_key(self):
key = ec.derive_private_key(private_value=1, curve=ec.SECP256R1())
der = key.private_bytes(
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption(),
)
# Ensure that serialized keys are always padded to the group order
# length.
assert (b"\x00" * 31 + b"\x01") in der
def test_load_private_key_short_key_warngs(self):
data = load_vectors_from_file(
os.path.join("asymmetric", "EC", "truncated-private-key.der"),
lambda f: f.read(),
mode="rb",
)
with pytest.raises(ValueError, match="private key value is too short"):
serialization.load_der_private_key(data, password=None)
| TestECSerialization |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_orm.py | {
"start": 1100,
"end": 4299
} | class ____(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(20)),
)
Table(
"child",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(20)),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
class Child(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Child, Parent, parent, child = (
cls.classes.Child,
cls.classes.Parent,
cls.tables.parent,
cls.tables.child,
)
cls.mapper_registry.map_imperatively(
Parent,
parent,
properties={"children": relationship(Child, backref="parent")},
)
cls.mapper_registry.map_imperatively(Child, child)
@classmethod
def insert_data(cls, connection):
parent, child = cls.tables.parent, cls.tables.child
connection.execute(parent.insert(), {"id": 1, "data": "p1"})
connection.execute(
child.insert(), {"id": 1, "data": "p1c1", "parent_id": 1}
)
def test_merge_no_load(self):
Parent = self.classes.Parent
sess = fixture_session()
sess2 = fixture_session()
p1 = sess.get(Parent, 1)
p1.children
# down from 185 on this this is a small slice of a usually
# bigger operation so using a small variance
sess2.connection() # autobegin
@profiling.function_call_count(variance=0.20)
def go1():
return sess2.merge(p1, load=False)
p2 = go1()
# third call, merge object already present. almost no calls.
sess2.connection() # autobegin
@profiling.function_call_count(variance=0.10, warmup=1)
def go2():
return sess2.merge(p2, load=False)
go2()
def test_merge_load(self):
Parent = self.classes.Parent
sess = fixture_session()
sess2 = fixture_session()
p1 = sess.get(Parent, 1)
p1.children
# preloading of collection took this down from 1728 to 1192
# using sqlite3 the C extension took it back up to approx. 1257
# (py2.6)
sess2.connection() # autobegin
# use a huge variance on this because it really changes quite randomly
# on different Python verrsions / CPUs etc.
@profiling.function_call_count(variance=0.50)
def go():
sess2.merge(p1)
go()
# one more time, count the SQL
def go2():
sess2.merge(p1)
sess2 = sessionmaker(testing.db)()
self.assert_sql_count(testing.db, go2, 2)
| MergeTest |
python | fluentpython__example-code-2e | 15-more-types/cafeteria/covariant.py | {
"start": 132,
"end": 282
} | class ____(Juice):
"""Delicious juice from Brazilian oranges."""
# tag::BEVERAGE_TYPES[]
T_co = TypeVar('T_co', covariant=True) # <1>
| OrangeJuice |
python | facebook__pyre-check | client/commands/start.py | {
"start": 2450,
"end": 2986
} | class ____:
shared_memory_path: str
changed_files_path: Optional[str] = None
def serialize(self) -> Tuple[str, Dict[str, str]]:
return (
"load_from_file",
{
"shared_memory_path": self.shared_memory_path,
**(
{}
if self.changed_files_path is None
else {"changed_files_path": self.changed_files_path}
),
},
)
@dataclasses.dataclass(frozen=True)
| LoadSavedStateFromFile |
python | milvus-io__pymilvus | tests/test_grpc_handler_mutations.py | {
"start": 604,
"end": 5447
} | class ____:
def test_insert_rows(self, channel: Any, client_thread: Any) -> None:
handler = GrpcHandler(channel=channel)
# Mock the schema
with patch.object(handler, 'describe_collection') as mock_describe:
mock_describe.return_value = {
"fields": [
{"name": "id", "type": DataType.INT64},
{"name": "vector", "type": DataType.FLOAT_VECTOR, "params": {"dim": 128}}
],
"enable_dynamic_field": False,
"update_timestamp": 0
}
entities = [
{"id": 1, "vector": [0.1] * 128},
{"id": 2, "vector": [0.2] * 128}
]
insert_future = client_thread.submit(
handler.insert_rows,
collection_name="test_collection",
entities=entities,
timeout=10
)
(invocation_metadata, request, rpc) = channel.take_unary_unary(
descriptor.methods_by_name["Insert"]
)
rpc.send_initial_metadata(())
expected_result = milvus_pb2.MutationResult(
status=common_pb2.Status(code=0),
IDs=schema_pb2.IDs(int_id=schema_pb2.LongArray(data=[1, 2])),
insert_cnt=2,
timestamp=100
)
rpc.terminate(expected_result, (), grpc.StatusCode.OK, "")
result = insert_future.result()
log.warning(f"result = {result}, type={type(result)}")
assert isinstance(result, MutationResult)
def test_insert_rows_single_entity(self, channel: Any, client_thread: Any) -> None:
handler = GrpcHandler(channel=channel)
with patch.object(handler, 'describe_collection') as mock_describe:
mock_describe.return_value = {
"fields": [
{"name": "id", "type": DataType.INT64}
],
"enable_dynamic_field": False,
"update_timestamp": 0
}
entity = {"id": 1}
insert_future = client_thread.submit(
handler.insert_rows,
collection_name="test_collection",
entities=entity, # Single dict instead of list
timeout=10
)
(invocation_metadata, request, rpc) = channel.take_unary_unary(
descriptor.methods_by_name["Insert"]
)
rpc.send_initial_metadata(())
expected_result = milvus_pb2.MutationResult(
status=common_pb2.Status(code=0),
IDs=schema_pb2.IDs(int_id=schema_pb2.LongArray(data=[1])),
insert_cnt=1,
timestamp=100
)
rpc.terminate(expected_result, (), grpc.StatusCode.OK, "")
result = insert_future.result()
assert isinstance(result, MutationResult)
def test_insert_rows_schema_mismatch(self, channel: Any, client_thread: Any) -> None:
handler = GrpcHandler(channel=channel)
with patch.object(handler, 'describe_collection') as mock_describe:
mock_describe.return_value = {
"fields": [
{"name": "id", "type": DataType.INT64}
],
}
entities = [{"id": 1}]
insert_future = client_thread.submit(
handler.insert_rows,
collection_name="test_collection",
entities=entities,
timeout=10
)
(invocation_metadata, request, rpc) = channel.take_unary_unary(
descriptor.methods_by_name["Insert"]
)
rpc.send_initial_metadata(())
# Return schema mismatch error
expected_result = milvus_pb2.MutationResult(
status=common_pb2.Status(
error_code=common_pb2.SchemaMismatch,
reason="Schema mismatch"
)
)
rpc.terminate(expected_result, (), grpc.StatusCode.OK, "")
# Should trigger recursive call with updated schema
(invocation_metadata2, request2, rpc2) = channel.take_unary_unary(
descriptor.methods_by_name["Insert"]
)
rpc2.send_initial_metadata(())
expected_result2 = milvus_pb2.MutationResult(
status=common_pb2.Status(code=0),
IDs=schema_pb2.IDs(int_id=schema_pb2.LongArray(data=[1])),
insert_cnt=1,
timestamp=100
)
rpc2.terminate(expected_result2, (), grpc.StatusCode.OK, "")
result = insert_future.result()
assert isinstance(result, MutationResult)
| TestGrpcHandlerInsertOperations |
python | pytest-dev__pytest-cov | tests/contextful.py | {
"start": 195,
"end": 2014
} | class ____(unittest.TestCase):
items: ClassVar = []
@classmethod
def setUpClass(cls):
cls.items.append('hello') # s3
@classmethod
def tearDownClass(cls):
cls.items.pop() # t4
def setUp(self):
self.number = 1 # r3 r4
def tearDown(self):
self.number = None # r3 r4
def test_03(self):
assert self.number == 1 # r3
assert self.items[0] == 'hello' # r3
def test_04(self):
assert self.number == 1 # r4
assert self.items[0] == 'hello' # r4
@pytest.fixture
def some_data():
return [1, 2, 3] # s5 s6
def test_05(some_data):
assert len(some_data) == 3 # r5
@pytest.fixture
def more_data(some_data):
return [2 * x for x in some_data] # s6
def test_06(some_data, more_data):
assert len(some_data) == len(more_data) # r6
@pytest.fixture
def expensive_data():
return list(range(10)) # s7
def test_07(expensive_data):
assert len(expensive_data) == 10 # r7
def test_08(expensive_data):
assert len(expensive_data) == 10 # r8
@pytest.fixture(params=[1, 2, 3])
def parametrized_number(request):
return request.param # s9-1 s9-2 s9-3
def test_09(parametrized_number):
assert parametrized_number > 0 # r9-1 r9-2 r9-3
def test_10():
assert 1 == 1 # r10
@pytest.mark.parametrize(
('x', 'ans'),
[
(1, 101),
(2, 202),
],
)
def test_11(x, ans):
assert 100 * x + x == ans # r11-1 r11-2
@pytest.mark.parametrize(
('x', 'ans'),
[
(1, 101),
(2, 202),
],
ids=['one', 'two'],
)
def test_12(x, ans):
assert 100 * x + x == ans # r12-1 r12-2
@pytest.mark.parametrize('x', [1, 2])
@pytest.mark.parametrize('y', [3, 4])
def test_13(x, y):
assert x + y > 0 # r13-1 r13-2 r13-3 r13-4
| OldStyleTests |
python | protocolbuffers__protobuf | python/google/protobuf/descriptor.py | {
"start": 39594,
"end": 42670
} | class ____(DescriptorBase):
"""Descriptor for a method in a service.
Attributes:
name (str): Name of the method within the service.
full_name (str): Full name of method.
index (int): 0-indexed index of the method inside the service.
containing_service (ServiceDescriptor): The service that contains this
method.
input_type (Descriptor): The descriptor of the message that this method
accepts.
output_type (Descriptor): The descriptor of the message that this method
returns.
client_streaming (bool): Whether this method uses client streaming.
server_streaming (bool): Whether this method uses server streaming.
options (descriptor_pb2.MethodOptions or None): Method options message, or
None to use default method options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.MethodDescriptor
def __new__(
cls,
name,
full_name,
index,
containing_service,
input_type,
output_type,
client_streaming=False,
server_streaming=False,
options=None,
serialized_options=None,
create_key=None,
):
_message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access
return _message.default_pool.FindMethodByName(full_name)
def __init__(
self,
name,
full_name,
index,
containing_service,
input_type,
output_type,
client_streaming=False,
server_streaming=False,
options=None,
serialized_options=None,
create_key=None,
):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
if create_key is not _internal_create_key:
_Deprecated('create function MethodDescriptor()')
super(MethodDescriptor, self).__init__(
containing_service.file if containing_service else None,
options,
serialized_options,
'MethodOptions',
)
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
self.client_streaming = client_streaming
self.server_streaming = server_streaming
@property
def _parent(self):
return self.containing_service
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.MethodDescriptorProto.
Args:
proto (descriptor_pb2.MethodDescriptorProto): An empty descriptor proto.
Raises:
Error: If self couldn't be serialized, due to too few constructor
arguments.
"""
if self.containing_service is not None:
from google.protobuf import descriptor_pb2
service_proto = descriptor_pb2.ServiceDescriptorProto()
self.containing_service.CopyToProto(service_proto)
proto.CopyFrom(service_proto.method[self.index])
else:
raise Error('Descriptor does not contain a service.')
| MethodDescriptor |
python | great-expectations__great_expectations | tests/core/test_expectation_suite.py | {
"start": 27423,
"end": 31407
} | class ____:
"""Tests around the suite_parameter_options property of ExpectationSuites.
Note: suite_parameter_options is currently a sorted tuple, but doesn't necessarily have to be
"""
SUITE_PARAMETER_VALUE_SET = "my_value_set"
SUITE_PARAMETER_MIN = "my_min"
SUITE_PARAMETER_MAX = "my_max"
@pytest.fixture
def expectation_suite(self) -> ExpectationSuite:
get_context(mode="ephemeral")
return ExpectationSuite("test-suite")
@pytest.fixture
def expectation_with_suite_parameter(
self,
) -> Expectation:
return gxe.ExpectColumnDistinctValuesToBeInSet(
column="a", value_set={"$PARAMETER": self.SUITE_PARAMETER_VALUE_SET}
)
@pytest.fixture
def expectation_with_duplicate_suite_parameter(
self,
) -> Expectation:
return gxe.ExpectColumnDistinctValuesToContainSet(
column="a", value_set={"$PARAMETER": self.SUITE_PARAMETER_VALUE_SET}
)
@pytest.fixture
def expectation_with_multiple_suite_parameters(
self,
) -> Expectation:
return gxe.ExpectColumnValuesToBeBetween(
column="c",
min_value={"$PARAMETER": self.SUITE_PARAMETER_MIN},
max_value={"$PARAMETER": self.SUITE_PARAMETER_MAX},
)
@pytest.fixture
def expectation_without_suite_parameters(
self,
) -> Expectation:
return gxe.ExpectColumnDistinctValuesToBeInSet(column="d", value_set=[1, 2])
@pytest.mark.unit
def test_empty_suite(self, expectation_suite: ExpectationSuite):
assert expectation_suite.suite_parameter_options == tuple()
@pytest.mark.unit
def test_expectations_but_no_evaluation_params(
self,
expectation_suite: ExpectationSuite,
expectation_without_suite_parameters: Expectation,
):
expectation_suite.add_expectation(expectation_without_suite_parameters)
assert expectation_suite.suite_parameter_options == tuple()
@pytest.mark.unit
def test_expectation_with_suite_parameter(
self,
expectation_suite: ExpectationSuite,
expectation_with_suite_parameter: Expectation,
):
expectation_suite.add_expectation(expectation_with_suite_parameter)
assert expectation_suite.suite_parameter_options == (self.SUITE_PARAMETER_VALUE_SET,)
@pytest.mark.unit
def test_duplicate_suite_parameters_only_show_once(
self,
expectation_suite: ExpectationSuite,
expectation_with_suite_parameter: Expectation,
expectation_with_duplicate_suite_parameter: Expectation,
):
expectation_suite.add_expectation(expectation_with_suite_parameter)
expectation_suite.add_expectation(expectation_with_duplicate_suite_parameter)
assert expectation_suite.suite_parameter_options == (self.SUITE_PARAMETER_VALUE_SET,)
@pytest.mark.unit
def test_multiple_suite_parameters_on_one_expectation(
self,
expectation_suite: ExpectationSuite,
expectation_with_multiple_suite_parameters: Expectation,
):
expectation_suite.add_expectation(expectation_with_multiple_suite_parameters)
assert expectation_suite.suite_parameter_options == (
self.SUITE_PARAMETER_MAX,
self.SUITE_PARAMETER_MIN,
)
@pytest.mark.unit
def test_multiple_suite_parameters_across_multiple_expectation(
self,
expectation_suite: ExpectationSuite,
expectation_with_suite_parameter: Expectation,
expectation_with_multiple_suite_parameters: Expectation,
):
expectation_suite.add_expectation(expectation_with_suite_parameter)
expectation_suite.add_expectation(expectation_with_multiple_suite_parameters)
assert expectation_suite.suite_parameter_options == (
self.SUITE_PARAMETER_MAX,
self.SUITE_PARAMETER_MIN,
self.SUITE_PARAMETER_VALUE_SET,
)
| TestSuiteParameterOptions |
python | numba__numba | numba/tests/test_recarray_usecases.py | {
"start": 1262,
"end": 3839
} | class ____(TestCase):
def setUp(self):
fields = [('f1', '<f8'), ('s1', '|S3'), ('f2', '<f8')]
self.unaligned_dtype = np.dtype(fields)
self.aligned_dtype = np.dtype(fields, align=True)
def test_usecase1(self):
pyfunc = usecase1
# This is an unaligned dtype
mystruct_dt = np.dtype([('p', np.float64),
('row', np.float64),
('col', np.float64)])
mystruct = numpy_support.from_dtype(mystruct_dt)
cfunc = njit((mystruct[:], mystruct[:]))(pyfunc)
st1 = np.recarray(3, dtype=mystruct_dt)
st2 = np.recarray(3, dtype=mystruct_dt)
st1.p = np.arange(st1.size) + 1
st1.row = np.arange(st1.size) + 1
st1.col = np.arange(st1.size) + 1
st2.p = np.arange(st2.size) + 1
st2.row = np.arange(st2.size) + 1
st2.col = np.arange(st2.size) + 1
expect1 = st1.copy()
expect2 = st2.copy()
got1 = expect1.copy()
got2 = expect2.copy()
pyfunc(expect1, expect2)
cfunc(got1, got2)
np.testing.assert_equal(expect1, got1)
np.testing.assert_equal(expect2, got2)
def _setup_usecase2to5(self, dtype):
N = 5
a = np.recarray(N, dtype=dtype)
a.f1 = np.arange(N)
a.f2 = np.arange(2, N + 2)
a.s1 = np.array(['abc'] * a.shape[0], dtype='|S3')
return a
def _test_usecase2to5(self, pyfunc, dtype):
array = self._setup_usecase2to5(dtype)
record_type = numpy_support.from_dtype(dtype)
cfunc = njit((record_type[:], types.intp))(pyfunc)
with captured_stdout():
pyfunc(array, len(array))
expect = sys.stdout.getvalue()
with captured_stdout():
cfunc(array, len(array))
got = sys.stdout.getvalue()
self.assertEqual(expect, got)
def test_usecase2(self):
self._test_usecase2to5(usecase2, self.unaligned_dtype)
self._test_usecase2to5(usecase2, self.aligned_dtype)
def test_usecase3(self):
self._test_usecase2to5(usecase3, self.unaligned_dtype)
self._test_usecase2to5(usecase3, self.aligned_dtype)
def test_usecase4(self):
self._test_usecase2to5(usecase4, self.unaligned_dtype)
self._test_usecase2to5(usecase4, self.aligned_dtype)
def test_usecase5(self):
self._test_usecase2to5(usecase5, self.unaligned_dtype)
self._test_usecase2to5(usecase5, self.aligned_dtype)
if __name__ == '__main__':
unittest.main()
| TestRecordUsecase |
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 23856,
"end": 24953
} | class ____(Request):
"""
Archive models
:param ids: IDs of the models to archive
:type ids: Sequence[str]
"""
_service = "models"
_action = "archive_many"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "IDs of the models to archive",
"items": {"type": "string"},
"type": "array",
}
},
"required": ["ids"],
"type": "object",
}
def __init__(self, ids: List[str], **kwargs: Any) -> None:
super(ArchiveManyRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> List[str]:
return self._property_ids
@ids.setter
def ids(self, value: List[str]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
| ArchiveManyRequest |
python | google__pytype | build_scripts/test_module.py | {
"start": 1283,
"end": 2188
} | class ____:
"""A class which collects stats while running tests."""
def __init__(self, options):
self._options = options
self.class_count = 0
self.method_count = 0
self.error_count = 0
self.fail_count = 0
self.unexpected_success_count = 0
def add_class(self):
self.class_count += 1
def add_method(self, test_result):
self.method_count += 1
self.error_count += len(test_result.errors)
self.fail_count += len(test_result.failures)
self.unexpected_success_count += len(test_result.unexpectedSuccesses)
def report(self):
msg = (
f"\nRan {self.method_count} methods from {self.class_count} classes.\n"
)
msg += "Found %d errors\n" % self.error_count
msg += "Found %d failures\n" % self.fail_count
msg += f"Found {self.unexpected_success_count} unexpected successes\n"
print_messages(self._options, msg, msg)
| StatsCollector |
python | getsentry__sentry | src/sentry/apidocs/parameters.py | {
"start": 24964,
"end": 25694
} | class ____:
DETAILED = OpenApiParameter(
name="detailed",
location="query",
required=False,
type=str,
description="""
Specify `"0"` to return team details that do not include projects.
""",
)
COLLAPSE = OpenApiParameter(
name="collapse",
location="query",
required=False,
type=str,
description="""
List of strings to opt out of certain pieces of data. Supports `organization`.
""",
)
EXPAND = OpenApiParameter(
name="expand",
location="query",
required=False,
type=str,
description="""
List of strings to opt in to additional data. Supports `projects`, `externalTeams`.
""",
)
| TeamParams |
python | wandb__wandb | wandb/sdk/artifacts/storage_handler.py | {
"start": 443,
"end": 1693
} | class ____(ABC):
@abstractmethod
def load_path(
self,
manifest_entry: ArtifactManifestEntry,
local: bool = False,
) -> URIStr | FilePathStr:
"""Load a file or directory given the corresponding index entry.
Args:
manifest_entry: The index entry to load
local: Whether to load the file locally or not
Returns:
A path to the file represented by `index_entry`
"""
raise NotImplementedError
@abstractmethod
def store_path(
self,
artifact: Artifact,
path: URIStr | FilePathStr,
name: str | None = None,
checksum: bool = True,
max_objects: int | None = None,
) -> list[ArtifactManifestEntry]:
"""Store the file or directory at the given path to the specified artifact.
Args:
path: The path to store
name: If specified, the logical name that should map to `path`
checksum: Whether to compute the checksum of the file
max_objects: The maximum number of objects to store
Returns:
A list of manifest entries to store within the artifact
"""
raise NotImplementedError
| _BaseStorageHandler |
python | openai__gym | tests/envs/utils_envs.py | {
"start": 592,
"end": 888
} | class ____(gym.Env):
"""Environment that does not have human-rendering."""
metadata = {"render_modes": ["rgb_array_list"], "render_fps": 4}
def __init__(self, render_mode=None):
assert render_mode in self.metadata["render_modes"]
self.render_mode = render_mode
| NoHuman |
python | scipy__scipy | scipy/special/tests/test_legendre.py | {
"start": 1101,
"end": 2307
} | class ____:
@pytest.mark.parametrize("shape", [(10,), (4, 9), (3, 5, 7)])
def test_ode(self, shape):
rng = np.random.default_rng(1234)
n = rng.integers(0, 100, shape)
x = rng.uniform(-1, 1, shape)
p, p_jac, p_hess = legendre_p(n, x, diff_n=2)
assert p.shape == shape
assert p_jac.shape == p.shape
assert p_hess.shape == p_jac.shape
err = (1 - x * x) * p_hess - 2 * x * p_jac + n * (n + 1) * p
np.testing.assert_allclose(err, 0, atol=1e-10)
@pytest.mark.parametrize("n_max", [1, 2, 4, 8, 16, 32])
@pytest.mark.parametrize("x_shape", [(10,), (4, 9), (3, 5, 7)])
def test_all_ode(self, n_max, x_shape):
rng = np.random.default_rng(1234)
x = rng.uniform(-1, 1, x_shape)
p, p_jac, p_hess = legendre_p_all(n_max, x, diff_n=2)
n = np.arange(n_max + 1)
n = np.expand_dims(n, axis = tuple(range(1, x.ndim + 1)))
assert p.shape == (len(n),) + x.shape
assert p_jac.shape == p.shape
assert p_hess.shape == p_jac.shape
err = (1 - x * x) * p_hess - 2 * x * p_jac + n * (n + 1) * p
np.testing.assert_allclose(err, 0, atol=1e-10)
| TestLegendreP |
python | prabhupant__python-ds | data_structures/bst/second_largest_in_bst.py | {
"start": 302,
"end": 1392
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def find_largest(root):
curr = root
while curr:
if not curr.right:
return curr.val
curr = curr.right
def second_largest(root):
if not root or (not root.left and not root.right):
return "BST should have atleast 2 nodes"
curr = root
while curr:
if curr.left and not curr.right:
return find_largest(curr.left)
if curr.right and not curr.right.left and not curr.right.right:
return curr.val
curr = curr.right
def insert(root, key):
if root == None:
return Node(key)
if key < root.val:
root.left = insert(root.left, key)
elif key > root.val:
root.right = insert(root.right, key)
return root
if __name__ == '__main__':
root = Node(6)
insert(root, 5)
insert(root, 3)
insert(root, 10)
insert(root, 4)
insert(root, 11)
insert(root, 14)
insert(root, 1)
print(second_largest(root))
| Node |
python | huggingface__transformers | tests/models/colpali/test_modeling_colpali.py | {
"start": 1410,
"end": 6382
} | class ____:
def __init__(
self,
parent,
ignore_index=-100,
image_token_index=0,
projector_hidden_act="gelu",
seq_length=25,
vision_feature_select_strategy="default",
vision_feature_layer=-1,
projection_dim=32,
text_config={
"model_type": "gemma",
"seq_length": 128,
"is_training": True,
"use_token_type_ids": False,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 1,
"head_dim": 8,
"intermediate_size": 37,
"hidden_activation": "gelu_pytorch_tanh",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 16,
"type_sequence_label_size": 2,
"initializer_range": 0.02,
"num_labels": 3,
"num_choices": 4,
"pad_token_id": 1,
},
is_training=False,
vision_config={
"use_labels": True,
"image_size": 20,
"patch_size": 5,
"num_image_tokens": 4,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"projection_dim": 32,
"num_key_value_heads": 1,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
use_cache=False,
embedding_dim=128,
):
self.parent = parent
self.ignore_index = ignore_index
# `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify
self.image_token_index = image_token_index
self.projector_hidden_act = projector_hidden_act
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
self.text_config = text_config
self.vision_config = vision_config
self.seq_length = seq_length
self.projection_dim = projection_dim
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.num_channels = vision_config["num_channels"]
self.image_size = vision_config["image_size"]
self.encoder_seq_length = seq_length
self.use_cache = use_cache
self.embedding_dim = embedding_dim
self.vlm_config = {
"model_type": "paligemma",
"text_config": self.text_config,
"vision_config": self.vision_config,
"ignore_index": self.ignore_index,
"image_token_index": self.image_token_index,
"projector_hidden_act": self.projector_hidden_act,
"projection_dim": self.projection_dim,
"vision_feature_select_strategy": self.vision_feature_select_strategy,
"vision_feature_layer": self.vision_feature_layer,
}
def get_config(self):
return ColPaliConfig(
vlm_config=self.vlm_config,
embedding_dim=self.embedding_dim,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.vlm_config.text_config.vocab_size - 1) + 1
attention_mask = input_ids.ne(1).to(torch_device)
# set the 16 first tokens to be image, and ensure that no other tokens are image tokens
# do not change this unless you modified image size or patch size
input_ids[input_ids == config.vlm_config.image_token_index] = self.pad_token_id
input_ids[:, :16] = config.vlm_config.image_token_index
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": input_ids,
"token_type_ids": torch.zeros_like(input_ids),
}
return config, inputs_dict
@require_torch
| ColPaliForRetrievalModelTester |
python | huggingface__transformers | src/transformers/models/arcee/modeling_arcee.py | {
"start": 12974,
"end": 14756
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: ArceeConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = ArceeAttention(config=config, layer_idx=layer_idx)
self.mlp = ArceeMLP(config)
self.input_layernorm = ArceeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = ArceeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| ArceeDecoderLayer |
python | ray-project__ray | doc/source/serve/doc_code/aws_neuron_core_inference_serve.py | {
"start": 1627,
"end": 2972
} | class ____:
def __init__(self):
import torch, torch_neuronx # noqa
from transformers import AutoTokenizer
self.model = torch.jit.load(neuron_model)
self.tokenizer = AutoTokenizer.from_pretrained(hf_model)
self.classmap = {
0: "anger",
1: "disgust",
2: "fear",
3: "joy",
4: "neutral",
5: "sadness",
6: "surprise",
}
def infer(self, sentence: str):
inputs = self.tokenizer.encode_plus(
sentence,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=128,
)
output = self.model(*(inputs["input_ids"], inputs["attention_mask"]))
class_id = torch.argmax(output["logits"], dim=1).item()
return self.classmap[class_id]
entrypoint = APIIngress.bind(BertBaseModel.bind())
# __neuron_serve_code_end__
if __name__ == "__main__":
import requests
import ray
# On inf2.8xlarge instance, there will be 2 neuron cores.
ray.init(resources={"neuron_cores": 2})
serve.run(entrypoint)
prompt = "Ray is super cool."
resp = requests.get(f"http://127.0.0.1:8000/infer?sentence={prompt}")
print(resp.status_code, resp.json())
assert resp.status_code == 200
| BertBaseModel |
python | mlflow__mlflow | mlflow/genai/agent_server/validator.py | {
"start": 2057,
"end": 2639
} | class ____(BaseAgentValidator):
def validate_and_convert_request(self, data: dict[str, Any]) -> ResponsesAgentRequest:
self.validate_pydantic(ResponsesAgentRequest, data)
return ResponsesAgentRequest(**data)
def validate_and_convert_result(self, result: Any, stream: bool = False) -> dict[str, Any]:
if stream:
self.validate_pydantic(ResponsesAgentStreamEvent, result)
else:
self.validate_pydantic(ResponsesAgentResponse, result)
return super().validate_and_convert_result(result, stream)
| ResponsesAgentValidator |
python | django__django | tests/test_client_regress/tests.py | {
"start": 10403,
"end": 14618
} | class ____(TestDataMixin, TestCase):
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get("/no_template_view/")
# The no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, "GET Template")
try:
self.assertTemplateUsed(response, "GET Template")
except AssertionError as e:
self.assertIn("No templates used to render the response", str(e))
try:
self.assertTemplateUsed(response, "GET Template", msg_prefix="abc")
except AssertionError as e:
self.assertIn("abc: No templates used to render the response", str(e))
msg = "No templates used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
self.assertTemplateUsed(response, "GET Template", count=2)
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get("/post_view/", {})
msg = (
": Template 'Empty GET Template' was used unexpectedly in "
"rendering the response"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertTemplateNotUsed(response, "Empty GET Template")
with self.assertRaisesMessage(AssertionError, "abc" + msg):
self.assertTemplateNotUsed(response, "Empty GET Template", msg_prefix="abc")
msg = (
": Template 'Empty POST Template' was not a template used to "
"render the response. Actual template(s) used: Empty GET Template"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertTemplateUsed(response, "Empty POST Template")
with self.assertRaisesMessage(AssertionError, "abc" + msg):
self.assertTemplateUsed(response, "Empty POST Template", msg_prefix="abc")
msg = (
": Template 'Empty GET Template' was expected to be rendered 2 "
"time(s) but was actually rendered 1 time(s)."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertTemplateUsed(response, "Empty GET Template", count=2)
with self.assertRaisesMessage(AssertionError, "abc" + msg):
self.assertTemplateUsed(
response, "Empty GET Template", msg_prefix="abc", count=2
)
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
"text": "Hello World",
"email": "foo@example.com",
"value": 37,
"single": "b",
"multi": ("b", "c", "e"),
}
response = self.client.post("/form_view_with_template/", post_data)
self.assertContains(response, "POST data OK")
msg = "Template '%s' was used unexpectedly in rendering the response"
with self.assertRaisesMessage(AssertionError, msg % "form_view.html"):
self.assertTemplateNotUsed(response, "form_view.html")
with self.assertRaisesMessage(AssertionError, msg % "base.html"):
self.assertTemplateNotUsed(response, "base.html")
msg = (
"Template 'Valid POST Template' was not a template used to render "
"the response. Actual template(s) used: form_view.html, base.html"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertTemplateUsed(response, "Valid POST Template")
msg = (
"Template 'base.html' was expected to be rendered 2 time(s) but "
"was actually rendered 1 time(s)."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertTemplateUsed(response, "base.html", count=2)
def test_template_rendered_multiple_times(self):
"""
Template assertions work when a template is rendered multiple times.
"""
response = self.client.get("/render_template_multiple_times/")
self.assertTemplateUsed(response, "base.html", count=2)
@override_settings(ROOT_URLCONF="test_client_regress.urls")
| AssertTemplateUsedTests |
python | pytest-dev__pytest | src/_pytest/mark/expression.py | {
"start": 1682,
"end": 8049
} | class ____:
__slots__ = ("current", "input", "tokens")
def __init__(self, input: str) -> None:
self.input = input
self.tokens = self.lex(input)
self.current = next(self.tokens)
def lex(self, input: str) -> Iterator[Token]:
pos = 0
while pos < len(input):
if input[pos] in (" ", "\t"):
pos += 1
elif input[pos] == "(":
yield Token(TokenType.LPAREN, "(", pos)
pos += 1
elif input[pos] == ")":
yield Token(TokenType.RPAREN, ")", pos)
pos += 1
elif input[pos] == "=":
yield Token(TokenType.EQUAL, "=", pos)
pos += 1
elif input[pos] == ",":
yield Token(TokenType.COMMA, ",", pos)
pos += 1
elif (quote_char := input[pos]) in ("'", '"'):
end_quote_pos = input.find(quote_char, pos + 1)
if end_quote_pos == -1:
raise SyntaxError(
f'closing quote "{quote_char}" is missing',
(FILE_NAME, 1, pos + 1, input),
)
value = input[pos : end_quote_pos + 1]
if (backslash_pos := input.find("\\")) != -1:
raise SyntaxError(
r'escaping with "\" not supported in marker expression',
(FILE_NAME, 1, backslash_pos + 1, input),
)
yield Token(TokenType.STRING, value, pos)
pos += len(value)
else:
match = re.match(r"(:?\w|:|\+|-|\.|\[|\]|\\|/)+", input[pos:])
if match:
value = match.group(0)
if value == "or":
yield Token(TokenType.OR, value, pos)
elif value == "and":
yield Token(TokenType.AND, value, pos)
elif value == "not":
yield Token(TokenType.NOT, value, pos)
else:
yield Token(TokenType.IDENT, value, pos)
pos += len(value)
else:
raise SyntaxError(
f'unexpected character "{input[pos]}"',
(FILE_NAME, 1, pos + 1, input),
)
yield Token(TokenType.EOF, "", pos)
@overload
def accept(self, type: TokenType, *, reject: Literal[True]) -> Token: ...
@overload
def accept(
self, type: TokenType, *, reject: Literal[False] = False
) -> Token | None: ...
def accept(self, type: TokenType, *, reject: bool = False) -> Token | None:
if self.current.type is type:
token = self.current
if token.type is not TokenType.EOF:
self.current = next(self.tokens)
return token
if reject:
self.reject((type,))
return None
def reject(self, expected: Sequence[TokenType]) -> NoReturn:
raise SyntaxError(
"expected {}; got {}".format(
" OR ".join(type.value for type in expected),
self.current.type.value,
),
(FILE_NAME, 1, self.current.pos + 1, self.input),
)
# True, False and None are legal match expression identifiers,
# but illegal as Python identifiers. To fix this, this prefix
# is added to identifiers in the conversion to Python AST.
IDENT_PREFIX = "$"
def expression(s: Scanner) -> ast.Expression:
if s.accept(TokenType.EOF):
ret: ast.expr = ast.Constant(False)
else:
ret = expr(s)
s.accept(TokenType.EOF, reject=True)
return ast.fix_missing_locations(ast.Expression(ret))
def expr(s: Scanner) -> ast.expr:
ret = and_expr(s)
while s.accept(TokenType.OR):
rhs = and_expr(s)
ret = ast.BoolOp(ast.Or(), [ret, rhs])
return ret
def and_expr(s: Scanner) -> ast.expr:
ret = not_expr(s)
while s.accept(TokenType.AND):
rhs = not_expr(s)
ret = ast.BoolOp(ast.And(), [ret, rhs])
return ret
def not_expr(s: Scanner) -> ast.expr:
if s.accept(TokenType.NOT):
return ast.UnaryOp(ast.Not(), not_expr(s))
if s.accept(TokenType.LPAREN):
ret = expr(s)
s.accept(TokenType.RPAREN, reject=True)
return ret
ident = s.accept(TokenType.IDENT)
if ident:
name = ast.Name(IDENT_PREFIX + ident.value, ast.Load())
if s.accept(TokenType.LPAREN):
ret = ast.Call(func=name, args=[], keywords=all_kwargs(s))
s.accept(TokenType.RPAREN, reject=True)
else:
ret = name
return ret
s.reject((TokenType.NOT, TokenType.LPAREN, TokenType.IDENT))
BUILTIN_MATCHERS = {"True": True, "False": False, "None": None}
def single_kwarg(s: Scanner) -> ast.keyword:
keyword_name = s.accept(TokenType.IDENT, reject=True)
if not keyword_name.value.isidentifier():
raise SyntaxError(
f"not a valid python identifier {keyword_name.value}",
(FILE_NAME, 1, keyword_name.pos + 1, s.input),
)
if keyword.iskeyword(keyword_name.value):
raise SyntaxError(
f"unexpected reserved python keyword `{keyword_name.value}`",
(FILE_NAME, 1, keyword_name.pos + 1, s.input),
)
s.accept(TokenType.EQUAL, reject=True)
if value_token := s.accept(TokenType.STRING):
value: str | int | bool | None = value_token.value[1:-1] # strip quotes
else:
value_token = s.accept(TokenType.IDENT, reject=True)
if (number := value_token.value).isdigit() or (
number.startswith("-") and number[1:].isdigit()
):
value = int(number)
elif value_token.value in BUILTIN_MATCHERS:
value = BUILTIN_MATCHERS[value_token.value]
else:
raise SyntaxError(
f'unexpected character/s "{value_token.value}"',
(FILE_NAME, 1, value_token.pos + 1, s.input),
)
ret = ast.keyword(keyword_name.value, ast.Constant(value))
return ret
def all_kwargs(s: Scanner) -> list[ast.keyword]:
ret = [single_kwarg(s)]
while s.accept(TokenType.COMMA):
ret.append(single_kwarg(s))
return ret
| Scanner |
python | Lightning-AI__lightning | src/lightning/pytorch/cli.py | {
"start": 2406,
"end": 3465
} | class ____(torch.optim.lr_scheduler.ReduceLROnPlateau):
"""Custom ReduceLROnPlateau scheduler that extends PyTorch's ReduceLROnPlateau.
This class adds a `monitor` attribute to the standard PyTorch ReduceLROnPlateau to specify which metric should be
tracked for learning rate adjustment.
"""
def __init__(self, optimizer: Optimizer, monitor: str, *args: Any, **kwargs: Any) -> None:
super().__init__(optimizer, *args, **kwargs)
self.monitor = monitor
# LightningCLI requires the ReduceLROnPlateau defined here, thus it shouldn't accept the one from pytorch:
LRSchedulerTypeTuple = (LRScheduler, ReduceLROnPlateau)
LRSchedulerTypeUnion = Union[LRScheduler, ReduceLROnPlateau]
LRSchedulerType = Union[type[LRScheduler], type[ReduceLROnPlateau]]
# Type aliases intended for convenience of CLI developers
ArgsType = Optional[Union[list[str], dict[str, Any], Namespace]]
OptimizerCallable = Callable[[Iterable], Optimizer]
LRSchedulerCallable = Callable[[Optimizer], Union[LRScheduler, ReduceLROnPlateau]]
| ReduceLROnPlateau |
python | wandb__wandb | wandb/errors/errors.py | {
"start": 889,
"end": 981
} | class ____(Error):
"""Raised when wandb core is not available."""
| WandbCoreNotAvailableError |
python | ray-project__ray | doc/source/serve/doc_code/stable_diffusion.py | {
"start": 1021,
"end": 2363
} | class ____:
def __init__(self):
from diffusers import DiffusionPipeline
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
self.pipe = DiffusionPipeline.from_pretrained(
model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
self.pipe = self.pipe.to("cuda")
def generate(self, prompt: str, img_size: int = 512):
assert len(prompt), "prompt parameter cannot be empty"
with torch.autocast("cuda"):
image = self.pipe(prompt, height=img_size, width=img_size).images[0]
return image
entrypoint = APIIngress.bind(StableDiffusionXL.bind())
# __example_code_end__
if __name__ == "__main__":
import ray
import os
import requests
ray.init(
runtime_env={
"pip": [
"diffusers==0.33.1",
"transformers==4.51.3",
]
}
)
handle = serve.run(entrypoint)
handle.generate.remote("hi").result()
prompt = "a cute cat is dancing on the grass."
prompt_query = "%20".join(prompt.split(" "))
resp = requests.get(f"http://127.0.0.1:8000/imagine?prompt={prompt_query}")
with open("output.png", "wb") as f:
f.write(resp.content)
assert os.path.exists("output.png")
os.remove("output.png")
| StableDiffusionXL |
python | wireservice__csvkit | tests/utils.py | {
"start": 2805,
"end": 3370
} | class ____:
def test_names(self):
output = self.get_output_as_io(['-n', 'examples/dummy.csv'])
self.assertEqual(next(output), ' 1: a\n')
self.assertEqual(next(output), ' 2: b\n')
self.assertEqual(next(output), ' 3: c\n')
def test_invalid_options(self):
args = ['-n', '--no-header-row', 'examples/dummy.csv']
output_file = io.StringIO()
utility = self.Utility(args, output_file)
with self.assertRaises(RequiredHeaderError):
utility.run()
output_file.close()
| NamesTests |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/base.py | {
"start": 40168,
"end": 45966
} | class ____(compiler.GenericTypeCompiler):
# Note:
# Oracle DATE == DATETIME
# Oracle does not allow milliseconds in DATE
# Oracle does not support TIME columns
def visit_datetime(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_double(self, type_, **kw):
return self.visit_DOUBLE_PRECISION(type_, **kw)
def visit_unicode(self, type_, **kw):
if self.dialect._use_nchar_for_unicode:
return self.visit_NVARCHAR2(type_, **kw)
else:
return self.visit_VARCHAR2(type_, **kw)
def visit_INTERVAL(self, type_, **kw):
return "INTERVAL DAY%s TO SECOND%s" % (
type_.day_precision is not None
and "(%d)" % type_.day_precision
or "",
type_.second_precision is not None
and "(%d)" % type_.second_precision
or "",
)
def visit_LONG(self, type_, **kw):
return "LONG"
def visit_TIMESTAMP(self, type_, **kw):
if getattr(type_, "local_timezone", False):
return "TIMESTAMP WITH LOCAL TIME ZONE"
elif type_.timezone:
return "TIMESTAMP WITH TIME ZONE"
else:
return "TIMESTAMP"
def visit_DOUBLE_PRECISION(self, type_, **kw):
return self._generate_numeric(type_, "DOUBLE PRECISION", **kw)
def visit_BINARY_DOUBLE(self, type_, **kw):
return self._generate_numeric(type_, "BINARY_DOUBLE", **kw)
def visit_BINARY_FLOAT(self, type_, **kw):
return self._generate_numeric(type_, "BINARY_FLOAT", **kw)
def visit_FLOAT(self, type_, **kw):
kw["_requires_binary_precision"] = True
return self._generate_numeric(type_, "FLOAT", **kw)
def visit_NUMBER(self, type_, **kw):
return self._generate_numeric(type_, "NUMBER", **kw)
def _generate_numeric(
self,
type_,
name,
precision=None,
scale=None,
_requires_binary_precision=False,
**kw,
):
if precision is None:
precision = getattr(type_, "precision", None)
if _requires_binary_precision:
binary_precision = getattr(type_, "binary_precision", None)
if precision and binary_precision is None:
# https://www.oracletutorial.com/oracle-basics/oracle-float/
estimated_binary_precision = int(precision / 0.30103)
raise exc.ArgumentError(
"Oracle Database FLOAT types use 'binary precision', "
"which does not convert cleanly from decimal "
"'precision'. Please specify "
"this type with a separate Oracle Database variant, such "
f"as {type_.__class__.__name__}(precision={precision})."
f"with_variant(oracle.FLOAT"
f"(binary_precision="
f"{estimated_binary_precision}), 'oracle'), so that the "
"Oracle Database specific 'binary_precision' may be "
"specified accurately."
)
else:
precision = binary_precision
if scale is None:
scale = getattr(type_, "scale", None)
if precision is None:
return name
elif scale is None:
n = "%(name)s(%(precision)s)"
return n % {"name": name, "precision": precision}
else:
n = "%(name)s(%(precision)s, %(scale)s)"
return n % {"name": name, "precision": precision, "scale": scale}
def visit_string(self, type_, **kw):
return self.visit_VARCHAR2(type_, **kw)
def visit_VARCHAR2(self, type_, **kw):
return self._visit_varchar(type_, "", "2")
def visit_NVARCHAR2(self, type_, **kw):
return self._visit_varchar(type_, "N", "2")
visit_NVARCHAR = visit_NVARCHAR2
def visit_VARCHAR(self, type_, **kw):
return self._visit_varchar(type_, "", "")
def _visit_varchar(self, type_, n, num):
if not type_.length:
return "%(n)sVARCHAR%(two)s" % {"two": num, "n": n}
elif not n and self.dialect._supports_char_length:
varchar = "VARCHAR%(two)s(%(length)s CHAR)"
return varchar % {"length": type_.length, "two": num}
else:
varchar = "%(n)sVARCHAR%(two)s(%(length)s)"
return varchar % {"length": type_.length, "two": num, "n": n}
def visit_text(self, type_, **kw):
return self.visit_CLOB(type_, **kw)
def visit_unicode_text(self, type_, **kw):
if self.dialect._use_nchar_for_unicode:
return self.visit_NCLOB(type_, **kw)
else:
return self.visit_CLOB(type_, **kw)
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_NUMBER(type_, precision=19, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_RAW(self, type_, **kw):
if type_.length:
return "RAW(%(length)s)" % {"length": type_.length}
else:
return "RAW"
def visit_ROWID(self, type_, **kw):
return "ROWID"
def visit_VECTOR(self, type_, **kw):
dim = type_.dim if type_.dim is not None else "*"
storage_format = (
type_.storage_format.value
if type_.storage_format is not None
else "*"
)
storage_type = (
type_.storage_type.value if type_.storage_type is not None else "*"
)
return f"VECTOR({dim},{storage_format},{storage_type})"
| OracleTypeCompiler |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 449,
"end": 524
} | class ____(models.Model):
field1 = models.CharField(max_length=30)
| PlainA |
python | bokeh__bokeh | src/bokeh/core/types.py | {
"start": 1914,
"end": 2033
} | class ____(TypedDict):
type: Literal["span"]
direction: Literal["h", "v"]
sx: float
sy: float
| SpanGeometry |
python | getsentry__sentry | tests/sentry/issue_detection/test_n_plus_one_api_calls_detector.py | {
"start": 867,
"end": 16388
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self._settings = get_detection_settings()
def find_problems(self, event: dict[str, Any]) -> list[PerformanceProblem]:
detector = NPlusOneAPICallsDetector(self._settings, event)
run_detector_on_data(detector, event)
return list(detector.stored_problems.values())
def create_event(self, description_maker: Callable[[int], str]) -> dict[str, Any]:
total_duration = self._settings[DetectorType.N_PLUS_ONE_API_CALLS]["total_duration"] + 1
count = self._settings[DetectorType.N_PLUS_ONE_API_CALLS]["count"] + 1
hash = uuid4().hex[:16]
return create_event(
[
create_span(
"http.client",
total_duration / count,
description_maker(i),
hash=hash,
)
for i in range(count)
]
)
def create_eligible_spans(self, duration: float, count: int) -> list[Span]:
spans = []
for i in range(count):
spans.append(
create_span(
"http.client",
duration,
f"GET /api/0/organizations/books?book_id={i}",
f"hash{i}",
)
)
return spans
def test_detects_problems_with_many_concurrent_calls_to_same_url(self) -> None:
event = get_event("n-plus-one-api-calls/n-plus-one-api-calls-in-issue-stream")
problems = self.find_problems(event)
assert self.find_problems(event) == [
PerformanceProblem(
fingerprint="1-1010-d750ce46bb1b13dd5780aac48098d5e20eea682c",
op="http.client",
type=PerformanceNPlusOneAPICallsGroupType,
desc="GET /api/0/organizations/sentry/events/?field=replayId&field=count%28%29&per_page=50&query=issue.id%3A",
parent_span_ids=["a0c39078d1570b00"],
cause_span_ids=[],
offender_span_ids=[
"ba198ace55bdb20f",
"8a20c71faa0fb6a7",
"9269c825d935b33a",
"9ea82f759505e0f3",
"8c55019639e94ab3",
"9b86746e9cc7fbf0",
"806aa31fe1874495",
"bf409b62d9c30197",
"896ac7d28addb37f",
"9c859aeaf6bfaea9",
"950d8f569bbe3d9e",
"b19a2811b457e87a",
"b566d4ce5b46d4f0",
"b33e9da4441a4800",
"8b68818410aa45d8",
"8ac4e73b53fc2077",
"9fe4a1aff019e39e",
"b29cd0c0cd85ae85",
"b3ff0062caa3ea51",
"a3fde2e38a66cc2c",
"b78802cd80762f57",
"9e2ea4d33b1c1bc6",
"bb827dc7a11085f4",
"a34089b08b6d0646",
"950801c0d7576650",
],
evidence_data={
"op": "http.client",
"parent_span_ids": ["a0c39078d1570b00"],
"cause_span_ids": [],
"offender_span_ids": [
"ba198ace55bdb20f",
"8a20c71faa0fb6a7",
"9269c825d935b33a",
"9ea82f759505e0f3",
"8c55019639e94ab3",
"9b86746e9cc7fbf0",
"806aa31fe1874495",
"bf409b62d9c30197",
"896ac7d28addb37f",
"9c859aeaf6bfaea9",
"950d8f569bbe3d9e",
"b19a2811b457e87a",
"b566d4ce5b46d4f0",
"b33e9da4441a4800",
"8b68818410aa45d8",
"8ac4e73b53fc2077",
"9fe4a1aff019e39e",
"b29cd0c0cd85ae85",
"b3ff0062caa3ea51",
"a3fde2e38a66cc2c",
"b78802cd80762f57",
"9e2ea4d33b1c1bc6",
"bb827dc7a11085f4",
"a34089b08b6d0646",
"950801c0d7576650",
],
},
evidence_display=[],
)
]
assert problems[0].title == "N+1 API Call"
def test_does_not_detect_problems_with_low_total_duration_of_spans(self) -> None:
event = get_event("n-plus-one-api-calls/n-plus-one-api-calls-in-issue-stream")
event["spans"] = self.create_eligible_spans(
100, 10
) # total duration is 1s, greater than default
problems = self.find_problems(event)
assert len(problems) == 1
event["spans"] = self.create_eligible_spans(
10, 5
) # total duration is 50ms, lower than default
problems = self.find_problems(event)
assert problems == []
def test_detects_problems_with_low_span_duration_high_total_duration(self) -> None:
event = get_event("n-plus-one-api-calls/n-plus-one-api-calls-in-issue-stream")
event["spans"] = self.create_eligible_spans(100, 10) # total duration is 1s
problems = self.find_problems(event)
assert len(problems) == 1
event["spans"] = self.create_eligible_spans(10, 50) # total duration is 500ms
problems = self.find_problems(event)
assert len(problems) == 1
def test_does_not_detect_problems_with_low_span_count(self) -> None:
event = get_event("n-plus-one-api-calls/n-plus-one-api-calls-in-issue-stream")
event["spans"] = self.create_eligible_spans(
1000, self._settings[DetectorType.N_PLUS_ONE_API_CALLS]["count"]
)
problems = self.find_problems(event)
assert len(problems) == 1
event["spans"] = self.create_eligible_spans(
1000, self._settings[DetectorType.N_PLUS_ONE_API_CALLS]["count"] - 1
)
problems = self.find_problems(event)
assert problems == []
def test_does_not_detect_problem_with_unparameterized_urls(self) -> None:
event = get_event("n-plus-one-api-calls/n-plus-one-api-calls-in-weather-app")
assert self.find_problems(event) == []
def test_does_not_detect_problem_with_concurrent_calls_to_different_urls(self) -> None:
event = get_event("n-plus-one-api-calls/not-n-plus-one-api-calls")
assert self.find_problems(event) == []
def test_fingerprints_events(self) -> None:
event = self.create_event(lambda i: "GET /clients/11/info")
[problem] = self.find_problems(event)
assert problem.fingerprint == "1-1010-e9daac10ea509a0bf84a8b8da45d36394868ad67"
def test_fingerprints_identical_relative_urls_together(self) -> None:
event1 = self.create_event(lambda i: "GET /clients/11/info")
[problem1] = self.find_problems(event1)
event2 = self.create_event(lambda i: "GET /clients/11/info")
[problem2] = self.find_problems(event2)
assert problem1.fingerprint == problem2.fingerprint
def test_fingerprints_same_relative_urls_together(self) -> None:
event1 = self.create_event(lambda i: f"GET /clients/42/info?id={i}")
[problem1] = self.find_problems(event1)
event2 = self.create_event(lambda i: f"GET /clients/42/info?id={i*2}")
[problem2] = self.find_problems(event2)
assert problem1.fingerprint == problem2.fingerprint
def test_fingerprints_same_parameterized_integer_relative_urls_together(self) -> None:
event1 = self.create_event(lambda i: f"GET /clients/17/info?id={i}")
[problem1] = self.find_problems(event1)
event2 = self.create_event(lambda i: f"GET /clients/16/info?id={i*2}")
[problem2] = self.find_problems(event2)
assert problem1.fingerprint == problem2.fingerprint
def test_fingerprints_different_relative_url_separately(self) -> None:
event1 = self.create_event(lambda i: f"GET /clients/11/info?id={i}")
[problem1] = self.find_problems(event1)
event2 = self.create_event(lambda i: f"GET /projects/11/details?pid={i}")
[problem2] = self.find_problems(event2)
assert problem1.fingerprint != problem2.fingerprint
def test_ignores_hostname_for_fingerprinting(self) -> None:
event1 = self.create_event(lambda i: f"GET http://service.io/clients/42/info?id={i}")
[problem1] = self.find_problems(event1)
event2 = self.create_event(lambda i: f"GET /clients/42/info?id={i}")
[problem2] = self.find_problems(event2)
assert problem1.fingerprint == problem2.fingerprint
@pytest.mark.parametrize(
"url,parameterized_url",
[
(
"",
"",
),
(
"http://service.io",
"http://service.io",
),
(
"https://www.service.io/resources/11",
"https://www.service.io/resources/*",
),
(
"https://www.service.io/resources/11/details",
"https://www.service.io/resources/*/details",
),
(
"https://www.service.io/resources/11/details?id=1&sort=down",
"https://www.service.io/resources/*/details?id=*&sort=*",
),
(
"https://www.service.io/resources/11/details?sort=down&id=1",
"https://www.service.io/resources/*/details?id=*&sort=*",
),
(
"https://service.io/clients/somecord/details?id=17",
"https://service.io/clients/somecord/details?id=*",
),
(
"/clients/11/project/1343",
"/clients/*/project/*",
),
(
"/clients/11/project/1343-turtles",
"/clients/*/project/*",
),
(
"/clients/11/project/1343turtles",
"/clients/*/project/1343turtles",
),
(
"/clients/563712f9722fb0996ac8f3905b40786f/project/1343", # md5
"/clients/*/project/*",
),
(
"/clients/563712f9722fb0996z/project/", # md5-like
"/clients/563712f9722fb0996z/project/",
),
(
"/clients/403926033d001b5279df37cbbe5287b7c7c267fa/project/1343", # sha1
"/clients/*/project/*",
),
(
"/clients/8ff81d74-606d-4c75-ac5e-cee65cbbc866/project/1343", # uuid
"/clients/*/project/*",
),
(
"/clients/hello-123s/project/1343", # uuid-like
"/clients/hello-123s/project/*",
),
(
"/item/5c9b9b609c172be2a013f534/details", # short hash
"/item/*/details",
),
(
"/item/be9a25322d/details", # shorter short hash
"/item/*/details",
),
(
"/item/defaced12/details", # false short hash
"/item/defaced12/details",
),
(
"/item/defaced12-abba/details", # false short hash 2
"/item/defaced12-abba/details",
),
],
)
def test_parameterizes_url(url: str, parameterized_url: str) -> None:
r = parameterize_url(url)
assert r == parameterized_url
@pytest.mark.parametrize(
"span",
[
{
"span_id": "a",
"op": "http.client",
"hash": "b",
"description": "GET http://service.io/resource",
},
{
"span_id": "a",
"op": "http.client",
"description": "GET http://service.io/resource",
"hash": "a",
"data": {
"url": "/resource",
},
},
{
"span_id": "a",
"op": "http.client",
"description": "GET http://service.io/resource",
"hash": "a",
"data": {
"url": {
"pathname": "/resource",
}
},
},
{
"span_id": "a",
"op": "http.client",
"description": "GET http://service.io/resource.json?param=something",
"hash": "a",
},
],
)
@pytest.mark.django_db
def test_allows_eligible_spans(span: Span) -> None:
detector = NPlusOneAPICallsDetector(get_detection_settings(), {})
assert detector._is_span_eligible(span)
@pytest.mark.parametrize(
"span",
[
{"span_id": "a", "op": None},
{"op": "http.client"},
{
"span_id": "a",
"op": "http.client",
"hash": "a",
"description": "POST http://service.io/resource",
},
{
"span_id": "a",
"op": "http.client",
"description": "GET http://service.io/resource.js",
"hash": "a",
},
{
"span_id": "a",
"op": "http.client",
"description": "GET /resource.js",
"hash": "a",
"data": {"url": "/resource.js"},
},
{
"span_id": "a",
"op": "http.client",
"description": "GET http://service.io/resource?graphql=somequery",
"hash": "a",
},
{
"span_id": "a",
"op": "http.client",
"description": "GET http://service.io/resource", # New JS SDK removes query string from description
"hash": "a",
"data": {
"http.query": "graphql=somequery",
"url": "http://service.io/resource",
},
},
{
"span_id": "a",
"op": "http.client",
"hash": "b",
"description": "GET /_next/data/LjdprRSkUtLP0bMUoWLur/items.json?collection=hello",
},
{
"span_id": "a",
"op": "http.client",
"hash": "b",
"description": "GET /__nextjs_original-stack-frame?isServerSide=false&file=webpack-internal%3A%2F%2F%2F.%2Fnode_modules%2Freact-dom%2Fcjs%2Freact-dom.development.js&methodName=Object.invokeGuardedCallbackDev&arguments=&lineNumber=73&column=3`",
},
],
)
@pytest.mark.django_db
def test_rejects_ineligible_spans(span: Span) -> None:
detector = NPlusOneAPICallsDetector(get_detection_settings(), {})
assert not detector._is_span_eligible(span)
@pytest.mark.parametrize(
"url,url_without_query",
[
("", ""),
("http://service.io", "http://service.io"),
("http://service.io/resource", "http://service.io/resource"),
("/resource?id=1", "/resource"),
("/resource?id=1&sort=down", "/resource"),
],
)
def test_removes_query_params(url: str, url_without_query: str) -> None:
assert without_query_params(url) == url_without_query
@pytest.mark.parametrize(
"event",
[get_event("n-plus-one-api-calls/not-n-plus-one-api-calls")],
)
def test_allows_eligible_events(event: dict[str, Any]) -> None:
assert NPlusOneAPICallsDetector.is_event_eligible(event)
@pytest.mark.parametrize(
"event",
[
{"contexts": {"trace": {"op": "task"}}},
],
)
def test_rejects_ineligible_events(event: dict[str, Any]) -> None:
assert not NPlusOneAPICallsDetector.is_event_eligible(event)
| NPlusOneAPICallsDetectorTest |
python | modin-project__modin | modin/config/envvars.py | {
"start": 2597,
"end": 6151
} | class ____(
EnvironmentVariable,
# 'type' is a mandatory parameter for '__init_subclasses__', so we have to pass something here,
# this doesn't force child classes to have 'str' type though, they actually can be any type
type=str,
):
"""Ensure values synchronization between sibling parameters."""
_update_sibling = True
@classmethod
def _sibling(cls) -> type["EnvWithSibilings"]:
"""Return a sibling parameter."""
raise NotImplementedError()
@classmethod
def get(cls) -> Any:
"""
Get parameter's value and ensure that it's equal to the sibling's value.
Returns
-------
Any
"""
sibling = cls._sibling()
if sibling._value is _UNSET and cls._value is _UNSET:
super().get()
with warnings.catch_warnings():
# filter warnings that can potentially come from the potentially deprecated sibling
warnings.filterwarnings("ignore", category=FutureWarning)
super(EnvWithSibilings, sibling).get()
if (
cls._value_source
== sibling._value_source
== ValueSource.GOT_FROM_CFG_SOURCE
):
raise ValueError(
f"Configuration is ambiguous. You cannot set '{cls.varname}' and '{sibling.varname}' at the same time."
)
# further we assume that there are only two valid sources for the variables: 'GOT_FROM_CFG' and 'DEFAULT',
# as otherwise we wouldn't ended-up in this branch at all, because all other ways of setting a value
# changes the '._value' attribute from '_UNSET' to something meaningful
from modin.error_message import ErrorMessage
if cls._value_source == ValueSource.GOT_FROM_CFG_SOURCE:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=sibling._value_source != ValueSource.DEFAULT
)
sibling._value = cls._value
sibling._value_source = ValueSource.GOT_FROM_CFG_SOURCE
elif sibling._value_source == ValueSource.GOT_FROM_CFG_SOURCE:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=cls._value_source != ValueSource.DEFAULT
)
cls._value = sibling._value
cls._value_source = ValueSource.GOT_FROM_CFG_SOURCE
else:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=cls._value_source != ValueSource.DEFAULT
or sibling._value_source != ValueSource.DEFAULT
)
# propagating 'cls' default value to the sibling
sibling._value = cls._value
return super().get()
@classmethod
def put(cls, value: Any) -> None:
"""
Set a new value to this parameter as well as to its sibling.
Parameters
----------
value : Any
"""
super().put(value)
# avoid getting into an infinite recursion
if cls._update_sibling:
cls._update_sibling = False
try:
with warnings.catch_warnings():
# filter potential future warnings of the sibling
warnings.filterwarnings("ignore", category=FutureWarning)
cls._sibling().put(value)
finally:
cls._update_sibling = True
| EnvWithSibilings |
python | ray-project__ray | rllib/env/single_agent_env_runner.py | {
"start": 2332,
"end": 37311
} | class ____(EnvRunner, Checkpointable):
"""The generic environment runner for the single agent case."""
@override(EnvRunner)
def __init__(self, *, config: AlgorithmConfig, **kwargs):
"""Initializes a SingleAgentEnvRunner instance.
Args:
config: An `AlgorithmConfig` object containing all settings needed to
build this `EnvRunner` class.
"""
super().__init__(config=config, **kwargs)
self.tune_trial_id: str = kwargs.get("tune_trial_id")
self.spaces = kwargs.get("spaces", {})
# Create our callbacks object.
self._callbacks: List[RLlibCallback] = [
cls() for cls in force_list(self.config.callbacks_class)
]
# Set device.
self._device = get_device(
self.config,
0 if not self.worker_index else self.config.num_gpus_per_env_runner,
)
# Create the vectorized gymnasium env.
self.env: Optional[gym.vector.VectorEnv] = None
self.num_envs: int = 0
if (
self.worker_index is None
or self.worker_index > 0
or self.config.create_env_on_local_worker
or self.config.num_env_runners == 0
):
self.make_env()
# Create the env-to-module connector pipeline.
self._env_to_module = self.config.build_env_to_module_connector(
env=self.env, spaces=self.spaces, device=self._device
)
# Cached env-to-module results taken at the end of a `_sample_timesteps()`
# call to make sure the final observation (before an episode cut) gets properly
# processed (and maybe postprocessed and re-stored into the episode).
# For example, if we had a connector that normalizes observations and directly
# re-inserts these new obs back into the episode, the last observation in each
# sample call would NOT be processed, which could be very harmful in cases,
# in which value function bootstrapping of those (truncation) observations is
# required in the learning step.
self._cached_to_module = None
# Create the RLModule.
self.module: Optional[RLModule] = None
self.make_module()
# Create the module-to-env connector pipeline.
self._module_to_env = self.config.build_module_to_env_connector(
env=self.env, spaces=self.spaces
)
# This should be the default.
self._needs_initial_reset: bool = True
self._episodes: List[Optional[SingleAgentEpisode]] = [
None for _ in range(self.num_envs)
]
self._shared_data = None
self._done_episodes_for_metrics: List[SingleAgentEpisode] = []
self._ongoing_episodes_for_metrics: DefaultDict[
EpisodeID, List[SingleAgentEpisode]
] = defaultdict(list)
self._weights_seq_no: int = 0
# Measures the time passed between returning from `sample()`
# and receiving the next `sample()` request from the user.
self._time_after_sampling = None
# Save whether to convert episodes to numpy during sample
# In `OfflineSingleAgentEnvRunner`, this result is set to False
# during initialisation
self.episodes_to_numpy = self.config.episodes_to_numpy
@override(EnvRunner)
def sample(
self,
*,
num_timesteps: int = None,
num_episodes: int = None,
explore: bool = None,
random_actions: bool = False,
force_reset: bool = False,
) -> List[SingleAgentEpisode]:
"""Runs and returns a sample (n timesteps or m episodes) on the env(s).
If neither `num_timesteps` nor `num_episodes` are provided and the config
`batch_mode` is "truncate_episodes" then
`config.get_rollout_fragment_length(self.worker_index) * self.num_envs`
timesteps will be sampled.
Args:
num_timesteps: The number of timesteps to sample during this call.
The episodes returned will contain the total timesteps greater than or
equal to num_timesteps and less than num_timesteps + num_envs_per_env_runner.
Note that only one of `num_timesteps` or `num_episodes` may be provided.
num_episodes: The number of episodes to sample during this call.
Note that only one of `num_timesteps` or `num_episodes` may be provided.
explore: If True, will use the RLModule's `forward_exploration()`
method to compute actions. If False, will use the RLModule's
`forward_inference()` method. If None (default), will use the `explore`
boolean setting from `self.config` passed into this EnvRunner's
constructor. You can change this setting in your config via
`config.env_runners(explore=True|False)`.
random_actions: If True, actions will be sampled randomly (from the action
space of the environment). If False (default), actions or action
distribution parameters are computed by the RLModule.
force_reset: Whether to force-reset all vectorized environments before
sampling. Useful if you would like to collect a clean slate of new
episodes via this call. Note that when sampling n episodes
(`num_episodes != None`), this is fixed to True.
Returns:
A list of `SingleAgentEpisode` instances, carrying the sampled data.
"""
if self.env is None:
raise ValueError(
f"{self} doesn't have an env! Can't call `sample()` on it."
)
assert not (num_timesteps is not None and num_episodes is not None)
# Log time between `sample()` requests.
if self._time_after_sampling is not None:
self.metrics.log_value(
key=TIME_BETWEEN_SAMPLING,
value=time.perf_counter() - self._time_after_sampling,
)
# Log current weight seq no.
self.metrics.log_value(
key=WEIGHTS_SEQ_NO,
value=self._weights_seq_no,
window=1,
)
with self.metrics.log_time(SAMPLE_TIMER):
# If no execution details are provided, use the config to try to infer the
# desired timesteps/episodes to sample and exploration behavior.
if explore is None:
explore = self.config.explore
if (
num_timesteps is None
and num_episodes is None
and self.config.batch_mode == "truncate_episodes"
):
num_timesteps = (
self.config.get_rollout_fragment_length(self.worker_index)
* self.num_envs
)
# Sample n timesteps.
if num_timesteps is not None:
assert num_timesteps >= 0
samples = self._sample(
num_timesteps=num_timesteps,
explore=explore,
random_actions=random_actions,
force_reset=force_reset,
)
# Sample m episodes.
elif num_episodes is not None:
assert num_episodes >= 0
samples = self._sample(
num_episodes=num_episodes,
explore=explore,
random_actions=random_actions,
)
# For complete episodes mode, sample as long as the number of timesteps
# done is smaller than the `train_batch_size`.
else:
samples = self._sample(
num_episodes=self.num_envs,
explore=explore,
random_actions=random_actions,
)
# Make the `on_sample_end` callback.
make_callback(
"on_sample_end",
callbacks_objects=self._callbacks,
callbacks_functions=self.config.callbacks_on_sample_end,
kwargs=dict(
env_runner=self,
metrics_logger=self.metrics,
samples=samples,
),
)
self._time_after_sampling = time.perf_counter()
return samples
def _sample(
self,
*,
num_timesteps: Optional[int] = None,
num_episodes: Optional[int] = None,
explore: bool,
random_actions: bool = False,
force_reset: bool = False,
) -> List[SingleAgentEpisode]:
"""Helper method to sample n timesteps or m episodes."""
done_episodes_to_return: List[SingleAgentEpisode] = []
# Have to reset the env (on all vector sub_envs).
if force_reset or num_episodes is not None or self._needs_initial_reset:
episodes = self._episodes = [None for _ in range(self.num_envs)]
shared_data = self._shared_data = {}
self._reset_envs(episodes, shared_data, explore)
# We just reset the env. Don't have to force this again in the next
# call to `self._sample_timesteps()`.
self._needs_initial_reset = False
else:
episodes = self._episodes
shared_data = self._shared_data
if num_episodes is not None:
self._needs_initial_reset = True
# Loop through `num_timesteps` timesteps or `num_episodes` episodes.
ts = 0
eps = 0
while (
(ts < num_timesteps) if num_timesteps is not None else (eps < num_episodes)
):
# Act randomly.
if random_actions:
to_env = {
Columns.ACTIONS: self.env.action_space.sample(),
}
# Compute an action using the RLModule.
else:
# Env-to-module connector (already cached).
to_module = self._cached_to_module
assert to_module is not None
self._cached_to_module = None
# RLModule forward pass: Explore or not.
if explore:
# Global env steps sampled are (roughly) this EnvRunner's lifetime
# count times the number of env runners in the algo.
global_env_steps_lifetime = (
self.metrics.peek(NUM_ENV_STEPS_SAMPLED_LIFETIME, default=0)
+ ts
) * (self.config.num_env_runners or 1)
with self.metrics.log_time(RLMODULE_INFERENCE_TIMER):
to_env = self.module.forward_exploration(
to_module, t=global_env_steps_lifetime
)
else:
with self.metrics.log_time(RLMODULE_INFERENCE_TIMER):
to_env = self.module.forward_inference(to_module)
# Module-to-env connector.
to_env = self._module_to_env(
rl_module=self.module,
batch=to_env,
episodes=episodes,
explore=explore,
shared_data=shared_data,
metrics=self.metrics,
metrics_prefix_key=(MODULE_TO_ENV_CONNECTOR,),
)
# Extract the (vectorized) actions (to be sent to the env) from the
# module/connector output. Note that these actions are fully ready (e.g.
# already unsquashed/clipped) to be sent to the environment and might not
# be identical to the actions produced by the RLModule/distribution, which
# are the ones stored permanently in the episode objects.
actions = to_env.pop(Columns.ACTIONS)
actions_for_env = to_env.pop(Columns.ACTIONS_FOR_ENV, actions)
# Try stepping the environment.
results = self._try_env_step(actions_for_env)
if results == ENV_STEP_FAILURE:
return self._sample(
num_timesteps=num_timesteps,
num_episodes=num_episodes,
explore=explore,
random_actions=random_actions,
force_reset=True,
)
observations, rewards, terminateds, truncateds, infos = results
observations, actions = unbatch(observations), unbatch(actions)
call_on_episode_start = set()
for env_index in range(self.num_envs):
extra_model_output = {k: v[env_index] for k, v in to_env.items()}
extra_model_output[WEIGHTS_SEQ_NO] = self._weights_seq_no
# Episode has no data in it yet -> Was just reset and needs to be called
# with its `add_env_reset()` method.
if not self._episodes[env_index].is_reset:
episodes[env_index].add_env_reset(
observation=observations[env_index],
infos=infos[env_index],
)
call_on_episode_start.add(env_index)
# Call `add_env_step()` method on episode.
else:
# Only increase ts when we actually stepped (not reset as a reset
# does not count as a timestep).
ts += 1
episodes[env_index].add_env_step(
observation=observations[env_index],
action=actions[env_index],
reward=rewards[env_index],
infos=infos[env_index],
terminated=terminateds[env_index],
truncated=truncateds[env_index],
extra_model_outputs=extra_model_output,
)
# Env-to-module connector pass cache results as we will do the RLModule
# forward pass only in the next `while`-iteration.
if self.module is not None:
self._cached_to_module = self._env_to_module(
batch={},
episodes=episodes,
explore=explore,
rl_module=self.module,
shared_data=shared_data,
metrics=self.metrics,
metrics_prefix_key=(ENV_TO_MODULE_CONNECTOR,),
)
for env_index in range(self.num_envs):
# Call `on_episode_start()` callback (always after reset).
if env_index in call_on_episode_start:
self._make_on_episode_callback(
"on_episode_start", env_index, episodes
)
# Make the `on_episode_step` callbacks.
else:
self._make_on_episode_callback(
"on_episode_step", env_index, episodes
)
# Episode is done.
if episodes[env_index].is_done:
eps += 1
# Make the `on_episode_end` callbacks (before finalizing the episode
# object).
self._make_on_episode_callback(
"on_episode_end", env_index, episodes
)
# Numpy'ize the episode.
if self.episodes_to_numpy:
# Any possibly compress observations.
done_episodes_to_return.append(episodes[env_index].to_numpy())
# Leave episode as lists of individual (obs, action, etc..) items.
else:
done_episodes_to_return.append(episodes[env_index])
# Also early-out if we reach the number of episodes within this
# for-loop.
if eps == num_episodes:
break
# Create a new episode object with no data in it and execute
# `on_episode_created` callback (before the `env.reset()` call).
self._new_episode(env_index, episodes)
# Return done episodes ...
self._done_episodes_for_metrics.extend(done_episodes_to_return)
# ... and all ongoing episode chunks.
# Also, make sure we start new episode chunks (continuing the ongoing episodes
# from the to-be-returned chunks).
ongoing_episodes_to_return = []
# Only if we are doing individual timesteps: We have to maybe cut an ongoing
# episode and continue building it on the next call to `sample()`.
if num_timesteps is not None:
ongoing_episodes_continuations = [
eps.cut(len_lookback_buffer=self.config.episode_lookback_horizon)
for eps in self._episodes
]
for eps in self._episodes:
# Just started episodes do not have to be returned. There is no data
# in them anyway.
if eps.t == 0:
continue
eps.validate()
self._ongoing_episodes_for_metrics[eps.id_].append(eps)
# Numpy'ize the episode.
if self.episodes_to_numpy:
# Any possibly compress observations.
ongoing_episodes_to_return.append(eps.to_numpy())
# Leave episode as lists of individual (obs, action, etc..) items.
else:
ongoing_episodes_to_return.append(eps)
# Continue collecting into the cut Episode chunks.
self._episodes = ongoing_episodes_continuations
# Ray metrics
self._log_env_steps(metric=self._metrics_num_env_steps_sampled, num_steps=ts)
self._increase_sampled_metrics(ts, len(done_episodes_to_return))
# Return collected episode data.
return done_episodes_to_return + ongoing_episodes_to_return
@override(EnvRunner)
def get_spaces(self):
if self.env is None:
return self.spaces
return {
INPUT_ENV_SPACES: (self.env.observation_space, self.env.action_space),
INPUT_ENV_SINGLE_SPACES: (
self.env.single_observation_space,
self.env.single_action_space,
),
DEFAULT_MODULE_ID: (
self._env_to_module.observation_space,
self.env.single_action_space,
),
}
@override(EnvRunner)
def get_metrics(self) -> ResultDict:
# Compute per-episode metrics (only on already completed episodes).
for eps in self._done_episodes_for_metrics:
assert eps.is_done
episode_length = len(eps)
episode_return = eps.get_return()
episode_duration_s = eps.get_duration_s()
# Don't forget about the already returned chunks of this episode.
if eps.id_ in self._ongoing_episodes_for_metrics:
for eps2 in self._ongoing_episodes_for_metrics[eps.id_]:
episode_length += len(eps2)
episode_return += eps2.get_return()
episode_duration_s += eps2.get_duration_s()
del self._ongoing_episodes_for_metrics[eps.id_]
self._log_episode_metrics(
episode_length, episode_return, episode_duration_s
)
# Now that we have logged everything, clear cache of done episodes.
self._done_episodes_for_metrics.clear()
# Return reduced metrics.
return self.metrics.reduce()
@override(Checkpointable)
def get_state(
self,
components: Optional[Union[str, Collection[str]]] = None,
*,
not_components: Optional[Union[str, Collection[str]]] = None,
**kwargs,
) -> StateDict:
state = {
NUM_ENV_STEPS_SAMPLED_LIFETIME: (
self.metrics.peek(NUM_ENV_STEPS_SAMPLED_LIFETIME, default=0)
),
}
if self._check_component(COMPONENT_RL_MODULE, components, not_components):
state[COMPONENT_RL_MODULE] = self.module.get_state(
components=self._get_subcomponents(COMPONENT_RL_MODULE, components),
not_components=self._get_subcomponents(
COMPONENT_RL_MODULE, not_components
),
**kwargs,
)
state[WEIGHTS_SEQ_NO] = self._weights_seq_no
if self._check_component(
COMPONENT_ENV_TO_MODULE_CONNECTOR, components, not_components
):
state[COMPONENT_ENV_TO_MODULE_CONNECTOR] = self._env_to_module.get_state()
if self._check_component(
COMPONENT_MODULE_TO_ENV_CONNECTOR, components, not_components
):
state[COMPONENT_MODULE_TO_ENV_CONNECTOR] = self._module_to_env.get_state()
return state
@override(Checkpointable)
def set_state(self, state: StateDict) -> None:
if COMPONENT_ENV_TO_MODULE_CONNECTOR in state:
self._env_to_module.set_state(state[COMPONENT_ENV_TO_MODULE_CONNECTOR])
if COMPONENT_MODULE_TO_ENV_CONNECTOR in state:
self._module_to_env.set_state(state[COMPONENT_MODULE_TO_ENV_CONNECTOR])
# Update the RLModule state.
if COMPONENT_RL_MODULE in state:
# A missing value for WEIGHTS_SEQ_NO or a value of 0 means: Force the
# update.
weights_seq_no = state.get(WEIGHTS_SEQ_NO, 0)
# Only update the weights, if this is the first synchronization or
# if the weights of this `EnvRunner` lag behind the actual ones.
if weights_seq_no == 0 or self._weights_seq_no < weights_seq_no:
rl_module_state = state[COMPONENT_RL_MODULE]
if isinstance(rl_module_state, ray.ObjectRef):
rl_module_state = ray.get(rl_module_state)
if (
isinstance(rl_module_state, dict)
and DEFAULT_MODULE_ID in rl_module_state
):
rl_module_state = rl_module_state[DEFAULT_MODULE_ID]
self.module.set_state(rl_module_state)
# Update our weights_seq_no, if the new one is > 0.
if weights_seq_no > 0:
self._weights_seq_no = weights_seq_no
# Update our lifetime counters.
if NUM_ENV_STEPS_SAMPLED_LIFETIME in state:
self.metrics.set_value(
key=NUM_ENV_STEPS_SAMPLED_LIFETIME,
value=state[NUM_ENV_STEPS_SAMPLED_LIFETIME],
reduce="sum",
with_throughput=True,
)
@override(Checkpointable)
def get_ctor_args_and_kwargs(self):
return (
(), # *args
{"config": self.config}, # **kwargs
)
@override(Checkpointable)
def get_metadata(self):
metadata = Checkpointable.get_metadata(self)
metadata.update(
{
# TODO (sven): Maybe add serialized (JSON-writable) config here?
}
)
return metadata
@override(Checkpointable)
def get_checkpointable_components(self):
return [
(COMPONENT_RL_MODULE, self.module),
(COMPONENT_ENV_TO_MODULE_CONNECTOR, self._env_to_module),
(COMPONENT_MODULE_TO_ENV_CONNECTOR, self._module_to_env),
]
@override(EnvRunner)
def assert_healthy(self):
"""Checks that self.__init__() has been completed properly.
Ensures that the instance has a `MultiRLModule` and an
environment defined.
Raises:
AssertionError: If the EnvRunner Actor has NOT been properly initialized.
"""
# Make sure we have built our gym.vector.Env and RLModule properly.
assert self.env and hasattr(self, "module")
@override(EnvRunner)
def make_env(self) -> None:
"""Creates a vectorized gymnasium env and stores it in `self.env`.
Note that users can change the EnvRunner's config (e.g. change
`self.config.env_config`) and then call this method to create new environments
with the updated configuration.
"""
# If an env already exists, try closing it first
# to allow it to properly clean up.
if self.env is not None:
try:
self.env.close()
except Exception as e:
logger.warning(
"Tried closing the existing env, but failed with error: "
f"{e.args[0]}"
)
env_config = self.config.env_config
if not isinstance(env_config, EnvContext):
env_ctx = EnvContext(
env_config,
worker_index=self.worker_index,
num_workers=self.num_workers,
remote=self.config.remote_worker_envs,
)
else:
env_ctx = env_config
# No env provided -> Error.
if not self.config.env:
raise ValueError(
"`config.env` is not provided! "
"You should provide a valid environment to your config through "
"`config.environment([env descriptor e.g. 'CartPole-v1'])`."
)
# Register env for the local context.
# Note, `gym.register` has to be called on each worker.
elif isinstance(self.config.env, str) and _global_registry.contains(
ENV_CREATOR, self.config.env
):
env_name = "rllib-single-agent-env-v0"
entry_point = _global_registry.get(ENV_CREATOR, self.config.env)
gym.register(
env_name,
entry_point=lambda: entry_point(env_ctx),
vector_entry_point=lambda num_envs: entry_point(
env_ctx | {"num_envs": num_envs}
),
)
env_config = {}
elif callable(self.config.env):
env_name = "rllib-single-agent-env-v0"
gym.register(
env_name,
entry_point=lambda: self.config.env(env_ctx),
vector_entry_point=lambda num_envs: self.config.env(
env_ctx | {"num_envs": num_envs}
),
)
env_config = {}
else:
env_name = self.config.env
vectorize_mode = gym.VectorizeMode(self.config.gym_env_vectorize_mode)
try:
self.env = DictInfoToList(
gym.make_vec(
env_name,
num_envs=self.config.num_envs_per_env_runner,
vectorization_mode=vectorize_mode,
**env_config,
)
)
except gym.error.Error as e:
raise EnvError(
ERR_MSG_INVALID_ENV_DESCRIPTOR.format(self.config.env)
) from e
self.num_envs: int = self.env.num_envs
assert self.num_envs == self.config.num_envs_per_env_runner
# Set the flag to reset all envs upon the next `sample()` call.
self._needs_initial_reset = True
# Call the `on_environment_created` callback.
make_callback(
"on_environment_created",
callbacks_objects=self._callbacks,
callbacks_functions=self.config.callbacks_on_environment_created,
kwargs=dict(
env_runner=self,
metrics_logger=self.metrics,
env=self.env.unwrapped,
env_context=env_ctx,
),
)
@override(EnvRunner)
def make_module(self):
env = self.env.unwrapped if self.env is not None else None
try:
module_spec: RLModuleSpec = self.config.get_rl_module_spec(
env=env, spaces=self.get_spaces(), inference_only=True
)
# Build the module from its spec.
self.module = module_spec.build()
# Move the RLModule to our device.
# TODO (sven): In order to make this framework-agnostic, we should maybe
# make the RLModule.build() method accept a device OR create an additional
# `RLModule.to()` override.
self.module.to(self._device)
# If `AlgorithmConfig.get_rl_module_spec()` is not implemented, this env runner
# will not have an RLModule, but might still be usable with random actions.
except NotImplementedError:
self.module = None
@override(EnvRunner)
def stop(self):
# Close our env object via gymnasium's API.
if self.env is not None:
self.env.close()
def _reset_envs(self, episodes, shared_data, explore):
# Create n new episodes and make the `on_episode_created` callbacks.
for env_index in range(self.num_envs):
self._new_episode(env_index, episodes)
# Erase all cached ongoing episodes (these will never be completed and
# would thus never be returned/cleaned by `get_metrics` and cause a memory
# leak).
self._ongoing_episodes_for_metrics.clear()
# Try resetting the environment.
observations, infos = self._try_env_reset(
# Only seed (if seed provided) upon initial reset.
seed=self._seed if self._needs_initial_reset else None,
# TODO (sven): Support options?
options=None,
)
observations = unbatch(observations)
# Set initial obs and infos in the episodes.
for env_index in range(self.num_envs):
episodes[env_index].add_env_reset(
observation=observations[env_index],
infos=infos[env_index],
)
# Run the env-to-module connector to make sure the reset-obs/infos have
# properly been processed (if applicable).
self._cached_to_module = None
if self.module:
self._cached_to_module = self._env_to_module(
rl_module=self.module,
episodes=episodes,
explore=explore,
shared_data=shared_data,
metrics=self.metrics,
metrics_prefix_key=(ENV_TO_MODULE_CONNECTOR,),
)
# Call `on_episode_start()` callbacks (always after reset).
for env_index in range(self.num_envs):
self._make_on_episode_callback("on_episode_start", env_index, episodes)
def _new_episode(self, env_index, episodes=None):
episodes = episodes if episodes is not None else self._episodes
episodes[env_index] = SingleAgentEpisode(
observation_space=self.env.single_observation_space,
action_space=self.env.single_action_space,
)
self._make_on_episode_callback("on_episode_created", env_index, episodes)
def _make_on_episode_callback(
self, which: str, idx: int, episodes: List[SingleAgentEpisode]
):
kwargs = dict(
episode=episodes[idx],
env_runner=self,
metrics_logger=self.metrics,
env=self.env.unwrapped,
rl_module=self.module,
env_index=idx,
)
if which == "on_episode_end":
kwargs["prev_episode_chunks"] = self._ongoing_episodes_for_metrics[
episodes[idx].id_
]
make_callback(
which,
callbacks_objects=self._callbacks,
callbacks_functions=getattr(self.config, f"callbacks_{which}"),
kwargs=kwargs,
)
def _increase_sampled_metrics(self, num_steps, num_episodes_completed):
# Per sample cycle stats.
self.metrics.log_value(
NUM_ENV_STEPS_SAMPLED, num_steps, reduce="sum", clear_on_reduce=True
)
self.metrics.log_value(
(NUM_AGENT_STEPS_SAMPLED, DEFAULT_AGENT_ID),
num_steps,
reduce="sum",
clear_on_reduce=True,
)
self.metrics.log_value(
(NUM_MODULE_STEPS_SAMPLED, DEFAULT_MODULE_ID),
num_steps,
reduce="sum",
clear_on_reduce=True,
)
self.metrics.log_value(
NUM_EPISODES,
num_episodes_completed,
reduce="sum",
clear_on_reduce=True,
)
# Lifetime stats.
self.metrics.log_value(
NUM_ENV_STEPS_SAMPLED_LIFETIME,
num_steps,
reduce="sum",
with_throughput=True,
)
self.metrics.log_value(
(NUM_AGENT_STEPS_SAMPLED_LIFETIME, DEFAULT_AGENT_ID),
num_steps,
reduce="sum",
)
self.metrics.log_value(
(NUM_MODULE_STEPS_SAMPLED_LIFETIME, DEFAULT_MODULE_ID),
num_steps,
reduce="sum",
)
self.metrics.log_value(
NUM_EPISODES_LIFETIME,
num_episodes_completed,
reduce="sum",
)
return num_steps
def _log_episode_metrics(self, length, ret, sec):
# Log general episode metrics.
# Use the configured window, but factor in the parallelism of the EnvRunners.
# As a result, we only log the last `window / num_env_runners` steps here,
# because everything gets parallel-merged in the Algorithm process.
win = max(
1,
int(
math.ceil(
self.config.metrics_num_episodes_for_smoothing
/ (self.config.num_env_runners or 1)
)
),
)
self.metrics.log_value(EPISODE_LEN_MEAN, length, window=win)
self.metrics.log_value(EPISODE_RETURN_MEAN, ret, window=win)
self.metrics.log_value(EPISODE_DURATION_SEC_MEAN, sec, window=win)
# Per-agent returns.
self.metrics.log_value(
("agent_episode_return_mean", DEFAULT_AGENT_ID), ret, window=win
)
# Per-RLModule returns.
self.metrics.log_value(
("module_episode_return_mean", DEFAULT_MODULE_ID), ret, window=win
)
# For some metrics, log min/max as well.
self.metrics.log_value(EPISODE_LEN_MIN, length, reduce="min", window=win)
self.metrics.log_value(EPISODE_RETURN_MIN, ret, reduce="min", window=win)
self.metrics.log_value(EPISODE_LEN_MAX, length, reduce="max", window=win)
self.metrics.log_value(EPISODE_RETURN_MAX, ret, reduce="max", window=win)
@Deprecated(
new="SingleAgentEnvRunner.get_state(components='rl_module')",
error=True,
)
def get_weights(self, *args, **kwargs):
pass
@Deprecated(new="SingleAgentEnvRunner.set_state()", error=True)
def set_weights(self, *args, **kwargs):
pass
| SingleAgentEnvRunner |
python | Unity-Technologies__ml-agents | ml-agents-envs/setup.py | {
"start": 268,
"end": 2289
} | class ____(install):
"""
Custom command to verify that the git tag is the expected one for the release.
Originally based on https://circleci.com/blog/continuously-deploying-python-packages-to-pypi-with-circleci/
This differs slightly because our tags and versions are different.
"""
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("GITHUB_REF", "NO GITHUB TAG!").replace("refs/tags/", "")
if tag != EXPECTED_TAG:
info = "Git tag: {} does not match the expected tag of this app: {}".format(
tag, EXPECTED_TAG
)
sys.exit(info)
# Get the long description from the README file
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="mlagents_envs",
version=VERSION,
description="Unity Machine Learning Agents Interface",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Unity-Technologies/ml-agents",
author="Unity Technologies",
author_email="ML-Agents@unity3d.com",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.10",
],
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests", "colabs", "*.ipynb"]
),
zip_safe=False,
install_requires=[
"cloudpickle",
"grpcio>=1.11.0,<=1.53.2",
"Pillow>=4.2.1",
"protobuf>=3.6,<3.21",
"pyyaml>=3.1.0",
"gym>=0.21.0",
"pettingzoo==1.15.0",
"numpy>=1.23.5,<1.24.0",
"filelock>=3.4.0",
],
python_requires=">=3.10.1,<=3.10.12",
# TODO: Remove this once mypy stops having spurious setuptools issues.
cmdclass={"verify": VerifyVersionCommand}, # type: ignore
)
| VerifyVersionCommand |
python | ray-project__ray | python/ray/data/_internal/logical/interfaces/logical_operator.py | {
"start": 4443,
"end": 5053
} | class ____(Enum):
"""Defines how predicates can be passed through through an operator."""
# Predicate can be pushed through as-is (e.g., Sort, Repartition, RandomShuffle, Limit)
PASSTHROUGH = "passthrough"
# Predicate can be pushed through but needs column rebinding (e.g., Project)
PASSTHROUGH_WITH_SUBSTITUTION = "passthrough_with_substitution"
# Predicate can be pushed into each branch (e.g., Union)
PUSH_INTO_BRANCHES = "push_into_branches"
# Predicate can be conditionally pushed based on columns (e.g., Join)
CONDITIONAL = "conditional"
| PredicatePassThroughBehavior |
python | sphinx-doc__sphinx | sphinx/util/logging.py | {
"start": 16922,
"end": 17377
} | class ____(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
message = super().format(record)
colour_name = getattr(record, 'color', '')
if not colour_name:
colour_name = COLOR_MAP.get(record.levelno, '')
if not colour_name:
return message
try:
return colourise(colour_name, message)
except ValueError:
return message
| ColorizeFormatter |
python | jd__tenacity | tests/test_tenacity.py | {
"start": 32590,
"end": 35909
} | class ____(logging.Handler):
"""Captures log records for inspection."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.records = []
def emit(self, record):
self.records.append(record)
def current_time_ms():
return int(round(time.time() * 1000))
@retry(
wait=tenacity.wait_fixed(0.05),
retry=tenacity.retry_if_result(lambda result: result is None),
)
def _retryable_test_with_wait(thing):
return thing.go()
@retry(
stop=tenacity.stop_after_attempt(3),
retry=tenacity.retry_if_result(lambda result: result is None),
)
def _retryable_test_with_stop(thing):
return thing.go()
@retry(retry=tenacity.retry_if_exception_cause_type(NameError))
def _retryable_test_with_exception_cause_type(thing):
return thing.go()
@retry(retry=tenacity.retry_if_exception_type(IOError))
def _retryable_test_with_exception_type_io(thing):
return thing.go()
@retry(retry=tenacity.retry_if_not_exception_type(IOError))
def _retryable_test_if_not_exception_type_io(thing):
return thing.go()
@retry(
stop=tenacity.stop_after_attempt(3), retry=tenacity.retry_if_exception_type(IOError)
)
def _retryable_test_with_exception_type_io_attempt_limit(thing):
return thing.go()
@retry(retry=tenacity.retry_unless_exception_type(NameError))
def _retryable_test_with_unless_exception_type_name(thing):
return thing.go()
@retry(
stop=tenacity.stop_after_attempt(3),
retry=tenacity.retry_unless_exception_type(NameError),
)
def _retryable_test_with_unless_exception_type_name_attempt_limit(thing):
return thing.go()
@retry(retry=tenacity.retry_unless_exception_type())
def _retryable_test_with_unless_exception_type_no_input(thing):
return thing.go()
@retry(
stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_if_exception_message(
message=NoCustomErrorAfterCount.derived_message
),
)
def _retryable_test_if_exception_message_message(thing):
return thing.go()
@retry(
retry=tenacity.retry_if_not_exception_message(
message=NoCustomErrorAfterCount.derived_message
)
)
def _retryable_test_if_not_exception_message_message(thing):
return thing.go()
@retry(
retry=tenacity.retry_if_exception_message(
match=NoCustomErrorAfterCount.derived_message[:3] + ".*"
)
)
def _retryable_test_if_exception_message_match(thing):
return thing.go()
@retry(
retry=tenacity.retry_if_not_exception_message(
match=NoCustomErrorAfterCount.derived_message[:3] + ".*"
)
)
def _retryable_test_if_not_exception_message_match(thing):
return thing.go()
@retry(
retry=tenacity.retry_if_not_exception_message(
message=NameErrorUntilCount.derived_message
)
)
def _retryable_test_not_exception_message_delay(thing):
return thing.go()
@retry
def _retryable_default(thing):
return thing.go()
@retry()
def _retryable_default_f(thing):
return thing.go()
@retry(retry=tenacity.retry_if_exception_type(CustomError))
def _retryable_test_with_exception_type_custom(thing):
return thing.go()
@retry(
stop=tenacity.stop_after_attempt(3),
retry=tenacity.retry_if_exception_type(CustomError),
)
def _retryable_test_with_exception_type_custom_attempt_limit(thing):
return thing.go()
| CapturingHandler |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 681726,
"end": 682369
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("UserContentEditEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("UserContentEdit"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| UserContentEditConnection |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 11235,
"end": 11810
} | class ____(AnsibleSerializable, enum.Enum):
"""Base class for serializable enumerations."""
def _as_dict(self) -> t.Dict[str, t.Any]:
return dict(value=self.value)
@classmethod
def _from_dict(cls, d: t.Dict[str, t.Any]) -> t.Self:
return cls(d['value'].lower())
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f'<{self.__class__.__name__}.{self.name}>'
@staticmethod
def _generate_next_value_(name, start, count, last_values):
return name.lower()
| AnsibleSerializableEnum |
python | getsentry__sentry | tests/sentry/utils/test_meta.py | {
"start": 423,
"end": 8880
} | class ____(TestCase):
def test_get_new(self) -> None:
assert Meta().raw() == {}
assert Meta().get() == {}
assert list(Meta().iter_errors()) == []
assert Meta().get_event_errors() == []
def test_create_new(self) -> None:
meta = Meta()
assert meta.create() == {}
assert meta.raw() == {"": {}}
def test_merge_new(self) -> None:
meta = Meta()
assert meta.merge(Meta(other_meta)) == other_meta[""]
assert meta.raw() == other_meta
def test_add_error_new(self) -> None:
meta = Meta()
meta.add_error("additional", "changed")
assert meta.raw() == {"": {"err": ["additional"], "val": "changed"}}
def test_get_missing(self) -> None:
assert Meta({}).raw() == {}
assert Meta({}).get() == {}
assert list(Meta({}).iter_errors()) == []
assert Meta({}).get_event_errors() == []
def test_create_missing(self) -> None:
data: dict[str, Any] = {}
meta = Meta(data)
assert meta.create() == {}
assert data == {"": {}}
def test_merge_missing(self) -> None:
data: dict[str, Any] = {}
meta = Meta(data)
assert meta.merge(Meta(other_meta)) == other_meta[""]
assert data == other_meta
def test_add_error_missing(self) -> None:
data: dict[str, Any] = {}
meta = Meta(data)
meta.add_error("additional", "changed")
assert data == {"": {"err": ["additional"], "val": "changed"}}
def test_get_none(self) -> None:
assert Meta({"": None}).raw() == {"": None}
assert Meta({"": None}).get() == {}
assert list(Meta({"": None}).iter_errors()) == []
assert Meta({"": None}).get_event_errors() == []
def test_create_none(self) -> None:
data = {"": None}
meta = Meta(data)
assert meta.create() == {}
assert data == {"": {}}
def test_merge_none(self) -> None:
data = {"": None}
meta = Meta(data)
assert meta.merge(Meta(other_meta)) == other_meta[""]
assert data == other_meta
def test_add_error_none(self) -> None:
data = {"": None}
meta = Meta(data)
meta.add_error("additional", "changed")
assert data == {"": {"err": ["additional"], "val": "changed"}}
def test_get_empty(self) -> None:
assert Meta({"": {}}).raw() == {"": {}}
assert Meta({"": {}}).get() == {}
assert list(Meta({"": {}}).iter_errors()) == []
assert Meta({"": {}}).get_event_errors() == []
def test_create_empty(self) -> None:
data: dict[str, Any] = {"": {}}
meta = Meta(data)
assert meta.create() == {}
assert data == {"": {}}
def test_merge_empty(self) -> None:
data: dict[str, Any] = {"": {}}
meta = Meta(data)
assert meta.merge(Meta(other_meta)) == other_meta[""]
assert data == other_meta
def test_add_error_empty(self) -> None:
data: dict[str, Any] = {"": {}}
meta = Meta(data)
meta.add_error("additional", "changed")
assert data == {"": {"err": ["additional"], "val": "changed"}}
def test_get_root(self) -> None:
assert Meta(input_meta).raw() == input_meta
assert Meta(input_meta).get() == input_meta[""]
assert list(Meta(input_meta).iter_errors()) == [["existing", {}]]
assert Meta(input_meta).get_event_errors() == [{"type": "existing", "value": "original"}]
def test_create_root(self) -> None:
changed = deepcopy(input_meta)
meta = Meta(changed)
# should be idempotent
assert meta.create() == input_meta[""]
assert changed == input_meta
def test_merge_root(self) -> None:
changed = deepcopy(input_meta)
meta = Meta(changed)
assert meta.merge(Meta(other_meta)) == merged_meta[""]
assert changed == merged_meta
def test_add_error_root(self) -> None:
changed = deepcopy(input_meta)
meta = Meta(changed)
meta.add_error("additional", "changed")
assert meta.get() == {
"err": ["existing", "additional"],
"val": "changed",
"rem": [{"type": "x"}],
}
def test_get_nested_missing(self) -> None:
data: dict[str, Any] = {}
assert Meta(data).enter("field").raw() == {}
assert Meta(data).enter("field").get() == {}
assert list(Meta(data).enter("field").iter_errors()) == []
assert Meta(data).enter("field").get_event_errors() == []
def test_create_nested_missing(self) -> None:
data: dict[str, Any] = {}
meta = Meta(data)
assert meta.enter("field").create() == {}
assert data == {"field": {"": {}}}
def test_merge_nested_missing(self) -> None:
data: dict[str, Any] = {}
meta = Meta(data)
assert meta.enter("field").merge(Meta(other_meta)) == other_meta[""]
assert data == {"field": other_meta}
def test_add_error_nested_missing(self) -> None:
data: dict[str, Any] = {}
meta = Meta(data)
meta.enter("field").add_error("additional", "changed")
assert meta.enter("field").get() == {"err": ["additional"], "val": "changed"}
def test_get_nested_existing(self) -> None:
data = {"field": input_meta}
assert Meta(data).enter("field").raw() == input_meta
assert Meta(data).enter("field").get() == input_meta[""]
assert list(Meta(data).enter("field").iter_errors()) == [["existing", {}]]
assert Meta(data).enter("field").get_event_errors() == [
{"type": "existing", "name": "field", "value": "original"}
]
def test_create_nested_existing(self) -> None:
data = {"field": input_meta}
changed = deepcopy(data)
meta = Meta(changed)
assert meta.enter("field").create() == input_meta[""]
assert changed == data
def test_merge_nested_existing(self) -> None:
data = {"field": input_meta}
changed = deepcopy(data)
meta = Meta(changed)
assert meta.enter("field").merge(Meta(other_meta)) == merged_meta[""]
assert changed == {"field": merged_meta}
def test_add_error_nested_existing(self) -> None:
data = {"field": input_meta}
changed = deepcopy(data)
meta = Meta(changed)
meta.enter("field").add_error("additional", "changed")
assert meta.enter("field").get() == {
"err": ["existing", "additional"],
"val": "changed",
"rem": [{"type": "x"}],
}
def test_get_nested_index(self) -> None:
data = {"0": input_meta}
assert Meta(data).enter(0).raw() == input_meta
assert Meta(data).enter(0).get() == input_meta[""]
assert list(Meta(data).enter(0).iter_errors()) == [["existing", {}]]
def test_create_nested_index(self) -> None:
data: dict[str, Any] = {}
meta = Meta(data)
assert meta.enter(0).create() == {}
assert data == {"0": {"": {}}}
def test_stringify_error(self) -> None:
meta = Meta()
meta.add_error(ValueError("invalid stuff"), "changed")
assert list(meta.iter_errors()) == [["invalid stuff", {}]]
def test_error_with_data(self) -> None:
meta = Meta()
meta.add_error("invalid url", data={"url": "invalid"})
assert list(meta.iter_errors()) == [["invalid url", {"url": "invalid"}]]
def test_get_multiple_event_errors(self) -> None:
# XXX: Value is only added to the first error, which is usually the
# normalization error.
assert Meta(merged_meta).get_event_errors() == [
{"type": "existing", "value": "changed"},
{"type": "additional"},
]
def test_add_remark(self) -> None:
meta = Meta()
meta.add_remark({"rule_id": "react", "type": "s"})
assert meta.get() == {
"rem": [["react", "s"]],
}
meta.add_remark({"rule_id": "removal-rule", "type": "x"})
assert meta.get() == {
"rem": [["react", "s"], ["removal-rule", "x"]],
}
def test_add_remark_with_value(self) -> None:
meta = Meta()
meta.add_remark({"rule_id": "react", "type": "s"}, "Minified React error #109")
assert meta.get() == {
"rem": [["react", "s"]],
"val": "Minified React error #109",
}
| MetaTests |
python | kamyu104__LeetCode-Solutions | Python/same-tree.py | {
"start": 181,
"end": 553
} | class ____(object):
# @param p, a tree node
# @param q, a tree node
# @return a boolean
def isSameTree(self, p, q):
if p is None and q is None:
return True
if p is not None and q is not None:
return p.val == q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
return False
| Solution |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 40663,
"end": 40860
} | class ____(Callback):
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
if batch_idx == 1:
raise RuntimeError("Trouble!")
| TroubledCallbackOnTrainBatchStart |
python | kamyu104__LeetCode-Solutions | Python/maximum-xor-of-two-non-overlapping-subtrees.py | {
"start": 3473,
"end": 4335
} | class ____(object):
def maxXor(self, n, edges, values):
"""
:type n: int
:type edges: List[List[int]]
:type values: List[int]
:rtype: int
"""
def dfs(u, p):
lookup[u] = values[u]+sum(dfs(v, u) for v in adj[u] if v != p)
return lookup[u]
def dfs2(u, p):
result = max(trie.query(lookup[u]), 0)
for v in adj[u]:
if v == p:
continue
result = max(result, dfs2(v, u))
trie.insert(lookup[u])
return result
adj = [[] for _ in xrange(len(values))]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
lookup = [0]*len(values)
dfs(0, -1)
trie = Trie(lookup[0].bit_length())
return dfs2(0, -1)
| Solution2 |
python | chroma-core__chroma | chromadb/db/impl/grpc/server.py | {
"start": 1599,
"end": 20729
} | class ____(SysDBServicer, Component):
"""A mock sysdb implementation that can be used for testing the grpc client. It stores
state in simple python data structures instead of a database."""
_server: grpc.Server
_server_port: int
_segments: Dict[str, Segment] = {}
_collection_to_segments: Dict[str, List[str]] = {}
_tenants_to_databases_to_collections: Dict[
str, Dict[str, Dict[str, Collection]]
] = {}
_tenants_to_database_to_id: Dict[str, Dict[str, UUID]] = {}
def __init__(self, system: System):
self._server_port = system.settings.require("chroma_server_grpc_port")
return super().__init__(system)
@overrides
def start(self) -> None:
self._server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
add_SysDBServicer_to_server(self, self._server) # type: ignore
self._server.add_insecure_port(f"[::]:{self._server_port}")
self._server.start()
return super().start()
@overrides
def stop(self) -> None:
self._server.stop(None)
return super().stop()
@overrides
def reset_state(self) -> None:
self._segments = {}
self._tenants_to_databases_to_collections = {}
# Create defaults
self._tenants_to_databases_to_collections[DEFAULT_TENANT] = {}
self._tenants_to_databases_to_collections[DEFAULT_TENANT][DEFAULT_DATABASE] = {}
self._tenants_to_database_to_id[DEFAULT_TENANT] = {}
self._tenants_to_database_to_id[DEFAULT_TENANT][DEFAULT_DATABASE] = UUID(int=0)
return super().reset_state()
@overrides(check_signature=False)
def CreateDatabase(
self, request: CreateDatabaseRequest, context: grpc.ServicerContext
) -> CreateDatabaseResponse:
tenant = request.tenant
database = request.name
if tenant not in self._tenants_to_databases_to_collections:
context.abort(grpc.StatusCode.NOT_FOUND, f"Tenant {tenant} not found")
if database in self._tenants_to_databases_to_collections[tenant]:
context.abort(
grpc.StatusCode.ALREADY_EXISTS, f"Database {database} already exists"
)
self._tenants_to_databases_to_collections[tenant][database] = {}
self._tenants_to_database_to_id[tenant][database] = UUID(hex=request.id)
return CreateDatabaseResponse()
@overrides(check_signature=False)
def GetDatabase(
self, request: GetDatabaseRequest, context: grpc.ServicerContext
) -> GetDatabaseResponse:
tenant = request.tenant
database = request.name
if tenant not in self._tenants_to_databases_to_collections:
context.abort(grpc.StatusCode.NOT_FOUND, f"Tenant {tenant} not found")
if database not in self._tenants_to_databases_to_collections[tenant]:
context.abort(grpc.StatusCode.NOT_FOUND, f"Database {database} not found")
id = self._tenants_to_database_to_id[tenant][database]
return GetDatabaseResponse(
database=proto.Database(id=id.hex, name=database, tenant=tenant),
)
@overrides(check_signature=False)
def CreateTenant(
self, request: CreateTenantRequest, context: grpc.ServicerContext
) -> CreateTenantResponse:
tenant = request.name
if tenant in self._tenants_to_databases_to_collections:
context.abort(
grpc.StatusCode.ALREADY_EXISTS, f"Tenant {tenant} already exists"
)
self._tenants_to_databases_to_collections[tenant] = {}
self._tenants_to_database_to_id[tenant] = {}
return CreateTenantResponse()
@overrides(check_signature=False)
def GetTenant(
self, request: GetTenantRequest, context: grpc.ServicerContext
) -> GetTenantResponse:
tenant = request.name
if tenant not in self._tenants_to_databases_to_collections:
context.abort(grpc.StatusCode.NOT_FOUND, f"Tenant {tenant} not found")
return GetTenantResponse(
tenant=proto.Tenant(name=tenant),
)
# We are forced to use check_signature=False because the generated proto code
# does not have type annotations for the request and response objects.
# TODO: investigate generating types for the request and response objects
@overrides(check_signature=False)
def CreateSegment(
self, request: CreateSegmentRequest, context: grpc.ServicerContext
) -> CreateSegmentResponse:
segment = from_proto_segment(request.segment)
return self.CreateSegmentHelper(segment, context)
def CreateSegmentHelper(
self, segment: Segment, context: grpc.ServicerContext
) -> CreateSegmentResponse:
if segment["id"].hex in self._segments:
context.abort(
grpc.StatusCode.ALREADY_EXISTS,
f"Segment {segment['id']} already exists",
)
self._segments[segment["id"].hex] = segment
return CreateSegmentResponse()
@overrides(check_signature=False)
def DeleteSegment(
self, request: DeleteSegmentRequest, context: grpc.ServicerContext
) -> DeleteSegmentResponse:
id_to_delete = request.id
if id_to_delete in self._segments:
del self._segments[id_to_delete]
return DeleteSegmentResponse()
else:
context.abort(
grpc.StatusCode.NOT_FOUND, f"Segment {id_to_delete} not found"
)
@overrides(check_signature=False)
def GetSegments(
self, request: GetSegmentsRequest, context: grpc.ServicerContext
) -> GetSegmentsResponse:
target_id = UUID(hex=request.id) if request.HasField("id") else None
target_type = request.type if request.HasField("type") else None
target_scope = (
from_proto_segment_scope(request.scope)
if request.HasField("scope")
else None
)
target_collection = UUID(hex=request.collection)
found_segments = []
for segment in self._segments.values():
if target_id and segment["id"] != target_id:
continue
if target_type and segment["type"] != target_type:
continue
if target_scope and segment["scope"] != target_scope:
continue
if target_collection and segment["collection"] != target_collection:
continue
found_segments.append(segment)
return GetSegmentsResponse(
segments=[to_proto_segment(segment) for segment in found_segments]
)
@overrides(check_signature=False)
def UpdateSegment(
self, request: UpdateSegmentRequest, context: grpc.ServicerContext
) -> UpdateSegmentResponse:
id_to_update = UUID(request.id)
if id_to_update.hex not in self._segments:
context.abort(
grpc.StatusCode.NOT_FOUND, f"Segment {id_to_update} not found"
)
else:
segment = self._segments[id_to_update.hex]
if request.HasField("metadata"):
target = cast(Dict[str, Any], segment["metadata"])
if segment["metadata"] is None:
segment["metadata"] = {}
self._merge_metadata(target, request.metadata)
if request.HasField("reset_metadata") and request.reset_metadata:
segment["metadata"] = {}
return UpdateSegmentResponse()
@overrides(check_signature=False)
def CreateCollection(
self, request: CreateCollectionRequest, context: grpc.ServicerContext
) -> CreateCollectionResponse:
collection_name = request.name
tenant = request.tenant
database = request.database
if tenant not in self._tenants_to_databases_to_collections:
context.abort(grpc.StatusCode.NOT_FOUND, f"Tenant {tenant} not found")
if database not in self._tenants_to_databases_to_collections[tenant]:
context.abort(grpc.StatusCode.NOT_FOUND, f"Database {database} not found")
# Check if the collection already exists globally by id
for (
search_tenant,
databases,
) in self._tenants_to_databases_to_collections.items():
for search_database, search_collections in databases.items():
if request.id in search_collections:
if (
search_tenant != request.tenant
or search_database != request.database
):
context.abort(
grpc.StatusCode.ALREADY_EXISTS,
f"Collection {request.id} already exists in tenant {search_tenant} database {search_database}",
)
elif not request.get_or_create:
# If the id exists for this tenant and database, and we are not doing a get_or_create, then
# we should return an already exists error
context.abort(
grpc.StatusCode.ALREADY_EXISTS,
f"Collection {request.id} already exists in tenant {search_tenant} database {search_database}",
)
# Check if the collection already exists in this database by name
collections = self._tenants_to_databases_to_collections[tenant][database]
matches = [c for c in collections.values() if c["name"] == collection_name]
assert len(matches) <= 1
if len(matches) > 0:
if request.get_or_create:
existing_collection = matches[0]
return CreateCollectionResponse(
collection=to_proto_collection(existing_collection),
created=False,
)
context.abort(
grpc.StatusCode.ALREADY_EXISTS,
f"Collection {collection_name} already exists",
)
configuration_json = json.loads(request.configuration_json_str)
id = UUID(hex=request.id)
new_collection = Collection(
id=id,
name=request.name,
configuration_json=configuration_json,
serialized_schema=None,
metadata=from_proto_metadata(request.metadata),
dimension=request.dimension,
database=database,
tenant=tenant,
version=0,
)
# Check that segments are unique and do not already exist
# Keep a track of the segments that are being added
segments_added = []
# Create segments for the collection
for segment_proto in request.segments:
segment = from_proto_segment(segment_proto)
if segment["id"].hex in self._segments:
# Remove the already added segment since we need to roll back
for s in segments_added:
self.DeleteSegment(DeleteSegmentRequest(id=s), context)
context.abort(
grpc.StatusCode.ALREADY_EXISTS,
f"Segment {segment['id']} already exists",
)
self.CreateSegmentHelper(segment, context)
segments_added.append(segment["id"].hex)
collections[request.id] = new_collection
collection_unique_key = f"{tenant}:{database}:{request.id}"
self._collection_to_segments[collection_unique_key] = segments_added
return CreateCollectionResponse(
collection=to_proto_collection(new_collection),
created=True,
)
@overrides(check_signature=False)
def DeleteCollection(
self, request: DeleteCollectionRequest, context: grpc.ServicerContext
) -> DeleteCollectionResponse:
collection_id = request.id
tenant = request.tenant
database = request.database
if tenant not in self._tenants_to_databases_to_collections:
context.abort(grpc.StatusCode.NOT_FOUND, f"Tenant {tenant} not found")
if database not in self._tenants_to_databases_to_collections[tenant]:
context.abort(grpc.StatusCode.NOT_FOUND, f"Database {database} not found")
collections = self._tenants_to_databases_to_collections[tenant][database]
if collection_id in collections:
del collections[collection_id]
collection_unique_key = f"{tenant}:{database}:{collection_id}"
segment_ids = self._collection_to_segments[collection_unique_key]
if segment_ids: # Delete segments if provided.
for segment_id in segment_ids:
del self._segments[segment_id]
return DeleteCollectionResponse()
else:
context.abort(
grpc.StatusCode.NOT_FOUND, f"Collection {collection_id} not found"
)
@overrides(check_signature=False)
def GetCollections(
self, request: GetCollectionsRequest, context: grpc.ServicerContext
) -> GetCollectionsResponse:
target_id = UUID(hex=request.id) if request.HasField("id") else None
target_name = request.name if request.HasField("name") else None
allCollections = {}
for tenant, databases in self._tenants_to_databases_to_collections.items():
for database, collections in databases.items():
if request.tenant != "" and tenant != request.tenant:
continue
if request.database != "" and database != request.database:
continue
allCollections.update(collections)
print(
f"Tenant: {tenant}, Database: {database}, Collections: {collections}"
)
found_collections = []
for collection in allCollections.values():
if target_id and collection["id"] != target_id:
continue
if target_name and collection["name"] != target_name:
continue
found_collections.append(collection)
return GetCollectionsResponse(
collections=[
to_proto_collection(collection) for collection in found_collections
]
)
@overrides(check_signature=False)
def CountCollections(
self, request: CountCollectionsRequest, context: grpc.ServicerContext
) -> CountCollectionsResponse:
request = GetCollectionsRequest(
tenant=request.tenant,
database=request.database,
)
collections = self.GetCollections(request, context)
return CountCollectionsResponse(count=len(collections.collections))
@overrides(check_signature=False)
def GetCollectionSize(
self, request: GetCollectionSizeRequest, context: grpc.ServicerContext
) -> GetCollectionSizeResponse:
return GetCollectionSizeResponse(
total_records_post_compaction=0,
)
@overrides(check_signature=False)
def GetCollectionWithSegments(
self, request: GetCollectionWithSegmentsRequest, context: grpc.ServicerContext
) -> GetCollectionWithSegmentsResponse:
allCollections = {}
for tenant, databases in self._tenants_to_databases_to_collections.items():
for database, collections in databases.items():
allCollections.update(collections)
print(
f"Tenant: {tenant}, Database: {database}, Collections: {collections}"
)
collection = allCollections.get(request.id, None)
if collection is None:
context.abort(
grpc.StatusCode.NOT_FOUND, f"Collection with id {request.id} not found"
)
collection_unique_key = (
f"{collection.tenant}:{collection.database}:{request.id}"
)
segments = [
self._segments[id]
for id in self._collection_to_segments[collection_unique_key]
]
if {segment["scope"] for segment in segments} != {
SegmentScope.METADATA,
SegmentScope.RECORD,
SegmentScope.VECTOR,
}:
context.abort(
grpc.StatusCode.INTERNAL,
f"Incomplete segments for collection {collection}: {segments}",
)
return GetCollectionWithSegmentsResponse(
collection=to_proto_collection(collection),
segments=[to_proto_segment(segment) for segment in segments],
)
@overrides(check_signature=False)
def UpdateCollection(
self, request: UpdateCollectionRequest, context: grpc.ServicerContext
) -> UpdateCollectionResponse:
id_to_update = UUID(request.id)
# Find the collection with this id
collections = {}
for tenant, databases in self._tenants_to_databases_to_collections.items():
for database, maybe_collections in databases.items():
if id_to_update.hex in maybe_collections:
collections = maybe_collections
if id_to_update.hex not in collections:
context.abort(
grpc.StatusCode.NOT_FOUND, f"Collection {id_to_update} not found"
)
else:
collection = collections[id_to_update.hex]
if request.HasField("name"):
collection["name"] = request.name
if request.HasField("dimension"):
collection["dimension"] = request.dimension
if request.HasField("metadata"):
# TODO: IN SysDB SQlite we have technical debt where we
# replace the entire metadata dict with the new one. We should
# fix that by merging it. For now we just do the same thing here
update_metadata = from_proto_update_metadata(request.metadata)
cleaned_metadata = None
if update_metadata is not None:
cleaned_metadata = {}
for key, value in update_metadata.items():
if value is not None:
cleaned_metadata[key] = value
collection["metadata"] = cleaned_metadata
elif request.HasField("reset_metadata"):
if request.reset_metadata:
collection["metadata"] = {}
return UpdateCollectionResponse()
@overrides(check_signature=False)
def ResetState(
self, request: Empty, context: grpc.ServicerContext
) -> ResetStateResponse:
self.reset_state()
return ResetStateResponse()
def _merge_metadata(self, target: Metadata, source: proto.UpdateMetadata) -> None:
target_metadata = cast(Dict[str, Any], target)
source_metadata = cast(Dict[str, Any], from_proto_update_metadata(source))
target_metadata.update(source_metadata)
# If a key has a None value, remove it from the metadata
for key, value in source_metadata.items():
if value is None and key in target:
del target_metadata[key]
| GrpcMockSysDB |
python | sympy__sympy | sympy/physics/quantum/spin.py | {
"start": 35103,
"end": 35416
} | class ____(SpinState, Bra):
"""Eigenbra of Jx.
See JzKet for the usage of spin eigenstates.
See Also
========
JzKet: Usage of spin states
"""
@classmethod
def dual_class(self):
return JxKet
@classmethod
def coupled_class(self):
return JxBraCoupled
| JxBra |
python | openai__openai-python | src/openai/resources/images.py | {
"start": 93923,
"end": 94356
} | class ____:
def __init__(self, images: Images) -> None:
self._images = images
self.create_variation = _legacy_response.to_raw_response_wrapper(
images.create_variation,
)
self.edit = _legacy_response.to_raw_response_wrapper(
images.edit,
)
self.generate = _legacy_response.to_raw_response_wrapper(
images.generate,
)
| ImagesWithRawResponse |
python | bokeh__bokeh | tests/unit/bokeh/test_transform.py | {
"start": 7928,
"end": 8993
} | class ____:
def test_basic(self) -> None:
t = bt.linear_cmap("foo", ["red", "green"], 0, 10, low_color="orange", high_color="blue", nan_color="pink")
assert isinstance(t, Field)
assert t.field == "foo"
assert isinstance(t.transform, LinearColorMapper)
assert t.transform.palette == ["red", "green"]
assert t.transform.low == 0
assert t.transform.high == 10
assert t.transform.low_color == "orange"
assert t.transform.high_color == "blue"
assert t.transform.nan_color == "pink"
def test_defaults(self) -> None:
t = bt.linear_cmap("foo", ["red", "green"], 0, 10)
assert isinstance(t, Field)
assert t.field == "foo"
assert isinstance(t.transform, LinearColorMapper)
assert t.transform.palette == ["red", "green"]
assert t.transform.low == 0
assert t.transform.high == 10
assert t.transform.low_color is None
assert t.transform.high_color is None
assert t.transform.nan_color == "gray"
| Test_linear_cmap |
python | django__django | tests/custom_lookups/tests.py | {
"start": 533,
"end": 1143
} | class ____(models.Lookup):
lookup_name = "div3"
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = (*lhs_params, *rhs_params)
return "(%s) %%%% 3 = %s" % (lhs, rhs), params
def as_oracle(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = (*lhs_params, *rhs_params)
return "mod(%s, 3) = %s" % (lhs, rhs), params
| Div3Lookup |
python | facebookresearch__faiss | tests/test_partition.py | {
"start": 2444,
"end": 3948
} | class ____(unittest.TestCase, PartitionTests):
def do_partition(self, n, q, maxval=None, seed=None):
if seed is None:
for i in range(50):
self.do_partition(n, q, maxval, i + 1234)
rs = np.random.RandomState(seed)
if maxval is None:
vals = rs.rand(n).astype('float32')
mirval = 1.0
else:
vals = rs.randint(maxval, size=n).astype('float32')
mirval = 65536
ids = (rs.permutation(n) + 12345).astype('int64')
dic = dict(zip(ids, vals))
vals_orig = vals.copy()
vals[:] = mirval - vals
sp = faiss.swig_ptr
if isinstance(q, int):
faiss.CMin_float_partition_fuzzy(
sp(vals), sp(ids), n,
q, q, None
)
else:
q_min, q_max = q
q = pointer_to_minus1()
faiss.CMin_float_partition_fuzzy(
sp(vals), sp(ids), n,
q_min, q_max, sp(q)
)
q = q[0]
assert q_min <= q <= q_max
vals[:] = mirval - vals
o = vals_orig.argsort()
thresh = vals_orig[o[q]]
n_eq = (vals_orig[o[:q]] == thresh).sum()
for i in range(q):
np.testing.assert_almost_equal(vals[i], dic[ids[i]], decimal=5)
self.assertLessEqual(vals[i], thresh)
if vals[i] == thresh:
n_eq -= 1
self.assertEqual(n_eq, 0)
| TestPartitioningFloatMin |
python | django__django | tests/foreign_object/test_agnostic_order_trimjoin.py | {
"start": 126,
"end": 861
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.address = Address.objects.create(company=1, customer_id=20)
cls.customer1 = Customer.objects.create(company=1, customer_id=20)
cls.contact1 = Contact.objects.create(company_code=1, customer_code=20)
def test_deep_mixed_forward(self):
self.assertQuerySetEqual(
Address.objects.filter(customer__contacts=self.contact1),
[self.address.id],
attrgetter("id"),
)
def test_deep_mixed_backward(self):
self.assertQuerySetEqual(
Contact.objects.filter(customer__address=self.address),
[self.contact1.id],
attrgetter("id"),
)
| TestLookupQuery |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_size05.py | {
"start": 315,
"end": 1368
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_size04.xlsx")
def test_create_file(self):
"""Test XlsxWriter chartarea properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [73773440, 73774976]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart, {"x_offset": 8, "y_offset": 9})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-zendesk/llama_index/readers/zendesk/base.py | {
"start": 160,
"end": 2398
} | class ____(BaseReader):
"""
Zendesk reader. Reads data from a Zendesk workspace.
Args:
zendesk_subdomain (str): Zendesk subdomain
locale (str): Locale of articles
"""
def __init__(self, zendesk_subdomain: str, locale: str = "en-us") -> None:
"""Initialize Zendesk reader."""
self.zendesk_subdomain = zendesk_subdomain
self.locale = locale
def load_data(self) -> List[Document]:
"""
Load data from the workspace.
Args:
workspace_id (str): Workspace ID.
Returns:
List[Document]: List of documents.
"""
from bs4 import BeautifulSoup
results = []
articles = self.get_all_articles()
for article in articles:
body = article["body"]
if body is None:
continue
soup = BeautifulSoup(body, "html.parser")
body = soup.get_text()
extra_info = {
"id": article["id"],
"title": article["title"],
"url": article["html_url"],
"updated_at": article["updated_at"],
}
results.append(
Document(
text=body,
extra_info=extra_info,
)
)
return results
def get_all_articles(self):
articles = []
next_page = None
while True:
response = self.get_articles_page(next_page)
articles.extend(response["articles"])
next_page = response["next_page"]
if next_page is None:
break
return articles
def get_articles_page(self, next_page: str = None):
import requests
if next_page is None:
url = f"https://{self.zendesk_subdomain}.zendesk.com/api/v2/help_center/{self.locale}/articles?per_page=100"
else:
url = next_page
response = requests.get(url)
response_json = json.loads(response.text)
next_page = response_json.get("next_page", None)
articles = response_json.get("articles", [])
return {"articles": articles, "next_page": next_page}
| ZendeskReader |
python | sqlalchemy__sqlalchemy | test/engine/test_reflection.py | {
"start": 1682,
"end": 48597
} | class ____(fixtures.TestBase, ComparesTables):
__sparse_driver_backend__ = True
def test_basic_reflection(self, connection, metadata):
meta = metadata
users = Table(
"engine_users",
meta,
Column("user_id", sa.INT, primary_key=True),
Column("user_name", sa.VARCHAR(20), nullable=False),
Column("test1", sa.CHAR(5), nullable=False),
Column("test2", sa.Float(), nullable=False),
Column("test3", sa.Text),
Column("test4", sa.Numeric(10, 2), nullable=False),
Column("test5", sa.Date),
Column(
"parent_user_id",
sa.Integer,
sa.ForeignKey("engine_users.user_id"),
),
Column("test6", sa.Date, nullable=False),
Column("test7", sa.Text),
Column("test8", sa.LargeBinary),
Column("test_passivedefault2", sa.Integer, server_default="5"),
Column("test9", sa.LargeBinary(100)),
Column("test10", sa.Numeric(10, 2)),
test_needs_fk=True,
)
addresses = Table(
"engine_email_addresses",
meta,
Column("address_id", sa.Integer, primary_key=True),
Column(
"remote_user_id", sa.Integer, sa.ForeignKey(users.c.user_id)
),
Column("email_address", sa.String(20)),
test_needs_fk=True,
)
meta.create_all(connection)
meta2 = MetaData()
reflected_users = Table(
"engine_users", meta2, autoload_with=connection
)
reflected_addresses = Table(
"engine_email_addresses",
meta2,
autoload_with=connection,
)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
def test_autoload_with_imply_autoload(self, metadata, connection):
meta = metadata
t = Table(
"t",
meta,
Column("id", sa.Integer, primary_key=True),
Column("x", sa.String(20)),
Column("y", sa.Integer),
)
meta.create_all(connection)
meta2 = MetaData()
reflected_t = Table("t", meta2, autoload_with=connection)
self.assert_tables_equal(t, reflected_t)
def test_two_foreign_keys(self, metadata, connection):
meta = metadata
Table(
"t1",
meta,
Column("id", sa.Integer, primary_key=True),
Column("t2id", sa.Integer, sa.ForeignKey("t2.id")),
Column("t3id", sa.Integer, sa.ForeignKey("t3.id")),
test_needs_fk=True,
)
Table(
"t2",
meta,
Column("id", sa.Integer, primary_key=True),
test_needs_fk=True,
)
Table(
"t3",
meta,
Column("id", sa.Integer, primary_key=True),
test_needs_fk=True,
)
meta.create_all(connection)
meta2 = MetaData()
t1r, t2r, t3r = (
Table(x, meta2, autoload_with=connection)
for x in ("t1", "t2", "t3")
)
assert t1r.c.t2id.references(t2r.c.id)
assert t1r.c.t3id.references(t3r.c.id)
def test_resolve_fks_false_table(self, connection, metadata):
meta = metadata
Table(
"t1",
meta,
Column("id", sa.Integer, primary_key=True),
Column("t2id", sa.Integer, sa.ForeignKey("t2.id")),
test_needs_fk=True,
)
Table(
"t2",
meta,
Column("id", sa.Integer, primary_key=True),
test_needs_fk=True,
)
meta.create_all(connection)
meta2 = MetaData()
t1 = Table("t1", meta2, resolve_fks=False, autoload_with=connection)
in_("t1", meta2.tables)
not_in("t2", meta2.tables)
assert_raises(
sa.exc.NoReferencedTableError,
lambda: list(t1.c.t2id.foreign_keys)[0].column,
)
t2 = Table("t2", meta2, autoload_with=connection)
# now it resolves
is_true(t1.c.t2id.references(t2.c.id))
def test_resolve_fks_false_extend_existing(self, connection, metadata):
meta = metadata
Table(
"t1",
meta,
Column("id", sa.Integer, primary_key=True),
Column("t2id", sa.Integer, sa.ForeignKey("t2.id")),
test_needs_fk=True,
)
Table(
"t2",
meta,
Column("id", sa.Integer, primary_key=True),
test_needs_fk=True,
)
meta.create_all(connection)
meta2 = MetaData()
Table("t1", meta2)
in_("t1", meta2.tables)
t1 = Table(
"t1",
meta2,
resolve_fks=False,
autoload_with=connection,
extend_existing=True,
)
not_in("t2", meta2.tables)
assert_raises(
sa.exc.NoReferencedTableError,
lambda: list(t1.c.t2id.foreign_keys)[0].column,
)
t2 = Table("t2", meta2, autoload_with=connection)
# now it resolves
is_true(t1.c.t2id.references(t2.c.id))
def test_resolve_fks_false_metadata(self, connection, metadata):
meta = metadata
Table(
"t1",
meta,
Column("id", sa.Integer, primary_key=True),
Column("t2id", sa.Integer, sa.ForeignKey("t2.id")),
test_needs_fk=True,
)
Table(
"t2",
meta,
Column("id", sa.Integer, primary_key=True),
test_needs_fk=True,
)
meta.create_all(connection)
meta2 = MetaData()
meta2.reflect(connection, resolve_fks=False, only=["t1"])
in_("t1", meta2.tables)
not_in("t2", meta2.tables)
t1 = meta2.tables["t1"]
assert_raises(
sa.exc.NoReferencedTableError,
lambda: list(t1.c.t2id.foreign_keys)[0].column,
)
meta2.reflect(connection, resolve_fks=False)
t2 = meta2.tables["t2"]
is_true(t1.c.t2id.references(t2.c.id))
@testing.combinations(
"get_table_names",
"get_view_names",
"get_materialized_view_names",
argnames="method",
)
def test_reflect_forwards_multiple_kwargs(
self, connection, metadata, method
):
with mock.patch(
f"sqlalchemy.engine.reflection.Inspector.{method}",
return_value=set(),
) as mocked_method:
metadata.reflect(
bind=connection, flag1=True, flag2=123, flag3="abc", views=True
)
mocked_method.assert_called_once_with(
None, flag1=True, flag2=123, flag3="abc"
)
def test_nonexistent(self, connection):
meta = MetaData()
assert_raises(
sa.exc.NoSuchTableError,
Table,
"nonexistent",
meta,
autoload_with=connection,
)
assert "nonexistent" not in meta.tables
@testing.variation("use_metadata_reflect", [True, False])
def test_extend_existing(self, connection, metadata, use_metadata_reflect):
meta = metadata
Table(
"t",
meta,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
Column("z", Integer, server_default="5"),
)
meta.create_all(connection)
m2 = MetaData()
old_z = Column("z", String, primary_key=True)
old_y = Column("y", String)
old_q = Column("q", Integer)
t2 = Table("t", m2, old_z, old_q)
eq_(list(t2.primary_key.columns), [t2.c.z])
t2 = Table(
"t",
m2,
old_y,
extend_existing=True,
autoload_with=connection,
)
if use_metadata_reflect:
m2.reflect(connection, extend_existing=True)
eq_(set(t2.columns.keys()), {"x", "y", "z", "q", "id"})
# this has been the actual behavior, the cols are added together,
# however the test wasn't checking this correctly
eq_(list(t2.primary_key.columns), [t2.c.z, t2.c.id])
assert t2.c.z is not old_z
if not use_metadata_reflect:
assert t2.c.y is old_y
assert t2.c.z.type._type_affinity is Integer
assert t2.c.q is old_q
m3 = MetaData()
t3 = Table("t", m3, Column("z", Integer))
t3 = Table(
"t",
m3,
extend_existing=False,
autoload_with=connection,
)
if use_metadata_reflect:
m3.reflect(connection, extend_existing=False)
eq_(set(t3.columns.keys()), {"z"})
m4 = MetaData()
old_z = Column("z", String, primary_key=True)
old_y = Column("y", String)
old_q = Column("q", Integer)
t4 = Table("t", m4, old_z, old_q)
eq_(list(t4.primary_key.columns), [t4.c.z])
t4 = Table(
"t",
m4,
old_y,
extend_existing=True,
autoload_replace=False,
autoload_with=connection,
)
if use_metadata_reflect:
m4.reflect(
connection, extend_existing=True, autoload_replace=False
)
eq_(set(t4.columns.keys()), {"x", "y", "z", "q", "id"})
eq_(list(t4.primary_key.columns), [t4.c.z, t4.c.id])
assert t4.c.z is old_z
assert t4.c.y is old_y
assert t4.c.z.type._type_affinity is String
assert t4.c.q is old_q
@testing.variation(
"extend_type",
[
"autoload",
"metadata_reflect",
"metadata_reflect_no_replace",
"plain_table",
],
)
def test_extend_existing_never_dupe_column(
self, connection, metadata, extend_type
):
"""test #8925"""
meta = metadata
Table(
"t",
meta,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
meta.create_all(connection)
m2 = MetaData()
if extend_type.metadata_reflect:
t2 = Table(
"t",
m2,
Column("id", Integer, primary_key=True),
Column("x", Integer, key="x2"),
)
with expect_warnings(
'Column with user-specified key "x2" is being replaced '
'with plain named column "x", key "x2" is being removed.'
):
m2.reflect(connection, extend_existing=True)
eq_(set(t2.columns.keys()), {"x", "y", "id"})
elif extend_type.metadata_reflect_no_replace:
t2 = Table(
"t",
m2,
Column("id", Integer, primary_key=True),
Column("x", Integer, key="x2"),
)
m2.reflect(
connection, extend_existing=True, autoload_replace=False
)
eq_(set(t2.columns.keys()), {"x2", "y", "id"})
elif extend_type.autoload:
t2 = Table(
"t",
m2,
Column("id", Integer, primary_key=True),
Column("x", Integer, key="x2"),
autoload_with=connection,
extend_existing=True,
)
eq_(set(t2.columns.keys()), {"x2", "y", "id"})
elif extend_type.plain_table:
Table(
"t",
m2,
Column("id", Integer, primary_key=True),
Column("x", Integer, key="x2"),
)
with expect_warnings(
'Column with user-specified key "x2" is being replaced with '
'plain named column "x", key "x2" is being removed.'
):
t2 = Table(
"t",
m2,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
extend_existing=True,
)
eq_(set(t2.columns.keys()), {"x", "y", "id"})
def test_extend_existing_reflect_all_dont_dupe_index(
self, connection, metadata
):
m = metadata
d = Table(
"d",
m,
Column("id", Integer, primary_key=True),
Column("foo", String(50)),
Column("bar", String(50)),
UniqueConstraint("bar"),
)
Index("foo_idx", d.c.foo)
Table(
"b",
m,
Column("id", Integer, primary_key=True),
Column("aid", ForeignKey("d.id")),
)
m.create_all(connection)
m2 = MetaData()
m2.reflect(connection, extend_existing=True)
eq_(
len(
[
idx
for idx in m2.tables["d"].indexes
if idx.name == "foo_idx"
]
),
1,
)
if (
# fmt: off
testing.requires.
unique_constraint_reflection_no_index_overlap.enabled
# fmt: on
):
eq_(
len(
[
const
for const in m2.tables["d"].constraints
if isinstance(const, UniqueConstraint)
]
),
1,
)
def test_include_columns_indexes(self, connection, metadata):
m = metadata
t1 = Table("t1", m, Column("a", sa.Integer), Column("b", sa.Integer))
sa.Index("foobar", t1.c.a, t1.c.b)
sa.Index("bat", t1.c.a)
m.create_all(connection)
m2 = MetaData()
t2 = Table("t1", m2, autoload_with=connection)
assert len(t2.indexes) == 2
m2 = MetaData()
t2 = Table("t1", m2, autoload_with=connection, include_columns=["a"])
assert len(t2.indexes) == 1
m2 = MetaData()
t2 = Table(
"t1", m2, autoload_with=connection, include_columns=["a", "b"]
)
assert len(t2.indexes) == 2
def test_autoload_replace_foreign_key_nonpresent(
self, connection, metadata
):
"""test autoload_replace=False with col plus FK
establishes the FK not present in the DB.
"""
Table("a", metadata, Column("id", Integer, primary_key=True))
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", Integer),
)
metadata.create_all(connection)
m2 = MetaData()
b2 = Table("b", m2, Column("a_id", Integer, sa.ForeignKey("a.id")))
a2 = Table("a", m2, autoload_with=connection)
b2 = Table(
"b",
m2,
extend_existing=True,
autoload_with=connection,
autoload_replace=False,
)
assert b2.c.id is not None
assert b2.c.a_id.references(a2.c.id)
eq_(len(b2.constraints), 2)
def test_autoload_replace_foreign_key_ispresent(
self, connection, metadata
):
"""test autoload_replace=False with col plus FK mirroring
DB-reflected FK skips the reflected FK and installs
the in-python one only.
"""
Table("a", metadata, Column("id", Integer, primary_key=True))
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", Integer, sa.ForeignKey("a.id")),
)
metadata.create_all(connection)
m2 = MetaData()
b2 = Table("b", m2, Column("a_id", Integer, sa.ForeignKey("a.id")))
a2 = Table("a", m2, autoload_with=connection)
b2 = Table(
"b",
m2,
extend_existing=True,
autoload_with=connection,
autoload_replace=False,
)
assert b2.c.id is not None
assert b2.c.a_id.references(a2.c.id)
eq_(len(b2.constraints), 2)
def test_autoload_replace_foreign_key_removed(self, connection, metadata):
"""test autoload_replace=False with col minus FK that's in the
DB means the FK is skipped and doesn't get installed at all.
"""
Table("a", metadata, Column("id", Integer, primary_key=True))
Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", Integer, sa.ForeignKey("a.id")),
)
metadata.create_all(connection)
m2 = MetaData()
b2 = Table("b", m2, Column("a_id", Integer))
a2 = Table("a", m2, autoload_with=connection)
b2 = Table(
"b",
m2,
extend_existing=True,
autoload_with=connection,
autoload_replace=False,
)
assert b2.c.id is not None
assert not b2.c.a_id.references(a2.c.id)
eq_(len(b2.constraints), 1)
def test_autoload_replace_primary_key(self, connection, metadata):
Table("a", metadata, Column("id", Integer))
metadata.create_all(connection)
m2 = MetaData()
a2 = Table("a", m2, Column("id", Integer, primary_key=True))
Table(
"a",
m2,
autoload_with=connection,
autoload_replace=False,
extend_existing=True,
)
eq_(list(a2.primary_key), [a2.c.id])
def test_autoload_replace_arg(self):
Table("t", MetaData(), autoload_replace=False)
def test_autoincrement_col(self, connection, metadata):
"""test that 'autoincrement' is reflected according to sqla's policy.
Don't mark this test as unsupported for any backend !
"""
meta = metadata
Table(
"test",
meta,
Column("id", sa.Integer, primary_key=True),
Column("data", sa.String(50)),
mysql_engine="InnoDB",
)
Table(
"test2",
meta,
Column(
"id", sa.Integer, sa.ForeignKey("test.id"), primary_key=True
),
Column("id2", sa.Integer, primary_key=True),
Column("data", sa.String(50)),
mysql_engine="InnoDB",
)
meta.create_all(connection)
m2 = MetaData()
t1a = Table("test", m2, autoload_with=connection)
assert t1a._autoincrement_column is t1a.c.id
t2a = Table("test2", m2, autoload_with=connection)
assert t2a._autoincrement_column is None
@skip("sqlite")
def test_unknown_types(self, connection, metadata):
"""Test the handling of unknown types for the given dialect.
sqlite is skipped because it has special rules for unknown types using
'affinity types' - this feature is tested in that dialect's test spec.
"""
meta = metadata
t = Table("test", meta, Column("foo", sa.DateTime))
t.create(connection)
with mock.patch.object(connection.dialect, "ischema_names", {}):
m2 = MetaData()
with testing.expect_warnings("Did not recognize type"):
t3 = Table("test", m2, autoload_with=connection)
is_(t3.c.foo.type.__class__, sa.types.NullType)
def test_basic_override(self, connection, metadata):
meta = metadata
table = Table(
"override_test",
meta,
Column("col1", sa.Integer, primary_key=True),
Column("col2", sa.String(20)),
Column("col3", sa.Numeric),
)
table.create(connection)
meta2 = MetaData()
table = Table(
"override_test",
meta2,
Column("col2", sa.Unicode()),
Column("col4", sa.String(30)),
autoload_with=connection,
)
self.assert_(isinstance(table.c.col1.type, sa.Integer))
self.assert_(isinstance(table.c.col2.type, sa.Unicode))
self.assert_(isinstance(table.c.col4.type, sa.String))
def test_override_upgrade_pk_flag(self, connection, metadata):
meta = metadata
table = Table(
"override_test",
meta,
Column("col1", sa.Integer),
Column("col2", sa.String(20)),
Column("col3", sa.Numeric),
)
table.create(connection)
meta2 = MetaData()
table = Table(
"override_test",
meta2,
Column("col1", sa.Integer, primary_key=True),
autoload_with=connection,
)
eq_(list(table.primary_key), [table.c.col1])
eq_(table.c.col1.primary_key, True)
def test_override_pkfk(self, connection, metadata):
"""test that you can override columns which contain foreign keys
to other reflected tables, where the foreign key column is also
a primary key column"""
meta = metadata
Table(
"users",
meta,
Column("id", sa.Integer, primary_key=True),
Column("name", sa.String(30)),
)
Table(
"addresses",
meta,
Column("id", sa.Integer, primary_key=True),
Column("street", sa.String(30)),
)
meta.create_all(connection)
meta2 = MetaData()
a2 = Table(
"addresses",
meta2,
Column(
"id", sa.Integer, sa.ForeignKey("users.id"), primary_key=True
),
autoload_with=connection,
)
u2 = Table("users", meta2, autoload_with=connection)
assert list(a2.primary_key) == [a2.c.id]
assert list(u2.primary_key) == [u2.c.id]
assert u2.join(a2).onclause.compare(u2.c.id == a2.c.id)
meta3 = MetaData()
u3 = Table("users", meta3, autoload_with=connection)
a3 = Table(
"addresses",
meta3,
Column(
"id", sa.Integer, sa.ForeignKey("users.id"), primary_key=True
),
autoload_with=connection,
)
assert list(a3.primary_key) == [a3.c.id]
assert list(u3.primary_key) == [u3.c.id]
assert u3.join(a3).onclause.compare(u3.c.id == a3.c.id)
def test_override_nonexistent_fk(self, connection, metadata):
"""test that you can override columns and create new foreign
keys to other reflected tables which have no foreign keys. this
is common with MySQL MyISAM tables."""
meta = metadata
Table(
"users",
meta,
Column("id", sa.Integer, primary_key=True),
Column("name", sa.String(30)),
)
Table(
"addresses",
meta,
Column("id", sa.Integer, primary_key=True),
Column("street", sa.String(30)),
Column("user_id", sa.Integer),
)
meta.create_all(connection)
meta2 = MetaData()
a2 = Table(
"addresses",
meta2,
Column("user_id", sa.Integer, sa.ForeignKey("users.id")),
autoload_with=connection,
)
u2 = Table("users", meta2, autoload_with=connection)
assert len(a2.c.user_id.foreign_keys) == 1
assert len(a2.foreign_keys) == 1
assert [c.parent for c in a2.foreign_keys] == [a2.c.user_id]
assert [c.parent for c in a2.c.user_id.foreign_keys] == [a2.c.user_id]
assert list(a2.c.user_id.foreign_keys)[0].parent is a2.c.user_id
assert u2.join(a2).onclause.compare(u2.c.id == a2.c.user_id)
meta3 = MetaData()
u3 = Table("users", meta3, autoload_with=connection)
a3 = Table(
"addresses",
meta3,
Column("user_id", sa.Integer, sa.ForeignKey("users.id")),
autoload_with=connection,
)
assert u3.join(a3).onclause.compare(u3.c.id == a3.c.user_id)
meta4 = MetaData()
u4 = Table(
"users",
meta4,
Column("id", sa.Integer, key="u_id", primary_key=True),
autoload_with=connection,
)
a4 = Table(
"addresses",
meta4,
Column("id", sa.Integer, key="street", primary_key=True),
Column("street", sa.String(30), key="user_id"),
Column(
"user_id", sa.Integer, sa.ForeignKey("users.u_id"), key="id"
),
autoload_with=connection,
)
# for the thing happening here with the column collection,
# see test/base/test_utils.py-> test_replace_switch_key_name.
assert u4.join(a4).onclause.compare(u4.c.u_id == a4.c.id)
assert list(u4.primary_key) == [u4.c.u_id]
assert len(u4.columns) == 2
assert len(u4.constraints) == 1
assert len(a4.columns) == 3
assert len(a4.constraints) == 2
def test_override_composite_fk(self, connection, metadata):
"""Test double-remove of composite foreign key, when replaced."""
Table(
"a",
metadata,
Column("x", sa.Integer, primary_key=True),
Column("y", sa.Integer, primary_key=True),
)
Table(
"b",
metadata,
Column("x", sa.Integer, primary_key=True),
Column("y", sa.Integer, primary_key=True),
sa.ForeignKeyConstraint(["x", "y"], ["a.x", "a.y"]),
)
metadata.create_all(connection)
meta2 = MetaData()
c1 = Column("x", sa.Integer, primary_key=True)
c2 = Column("y", sa.Integer, primary_key=True)
f1 = sa.ForeignKeyConstraint(["x", "y"], ["a.x", "a.y"])
b1 = Table("b", meta2, c1, c2, f1, autoload_with=connection)
assert b1.c.x is c1
assert b1.c.y is c2
assert f1 in b1.constraints
assert len(b1.constraints) == 2
def test_override_keys(self, metadata, connection):
"""test that columns can be overridden with a 'key',
and that ForeignKey targeting during reflection still works."""
meta = metadata
Table(
"a",
meta,
Column("x", sa.Integer, primary_key=True),
Column("z", sa.Integer),
test_needs_fk=True,
)
Table(
"b",
meta,
Column("y", sa.Integer, sa.ForeignKey("a.x")),
test_needs_fk=True,
)
meta.create_all(connection)
m2 = MetaData()
a2 = Table(
"a",
m2,
Column("x", sa.Integer, primary_key=True, key="x1"),
autoload_with=connection,
)
b2 = Table("b", m2, autoload_with=connection)
assert a2.join(b2).onclause.compare(a2.c.x1 == b2.c.y)
assert b2.c.y.references(a2.c.x1)
def test_nonreflected_fk_raises(self, connection, metadata):
"""test that a NoReferencedColumnError is raised when reflecting
a table with an FK to another table which has not included the target
column in its reflection.
"""
meta = metadata
Table(
"a",
meta,
Column("x", sa.Integer, primary_key=True),
Column("z", sa.Integer),
test_needs_fk=True,
)
Table(
"b",
meta,
Column("y", sa.Integer, sa.ForeignKey("a.x")),
test_needs_fk=True,
)
meta.create_all(connection)
m2 = MetaData()
a2 = Table("a", m2, include_columns=["z"], autoload_with=connection)
b2 = Table("b", m2, autoload_with=connection)
assert_raises(sa.exc.NoReferencedColumnError, a2.join, b2)
def test_override_existing_fk(self, connection, metadata):
"""test that you can override columns and specify new foreign
keys to other reflected tables, on columns which *do* already
have that foreign key, and that the FK is not duped."""
meta = metadata
Table(
"users",
meta,
Column("id", sa.Integer, primary_key=True),
Column("name", sa.String(30)),
test_needs_fk=True,
)
Table(
"addresses",
meta,
Column("id", sa.Integer, primary_key=True),
Column("user_id", sa.Integer, sa.ForeignKey("users.id")),
test_needs_fk=True,
)
meta.create_all(connection)
meta2 = MetaData()
a2 = Table(
"addresses",
meta2,
Column("user_id", sa.Integer, sa.ForeignKey("users.id")),
autoload_with=connection,
)
u2 = Table("users", meta2, autoload_with=connection)
s = sa.select(a2).subquery()
assert s.c.user_id is not None
assert len(a2.foreign_keys) == 1
assert len(a2.c.user_id.foreign_keys) == 1
assert len(a2.constraints) == 2
assert [c.parent for c in a2.foreign_keys] == [a2.c.user_id]
assert [c.parent for c in a2.c.user_id.foreign_keys] == [a2.c.user_id]
assert list(a2.c.user_id.foreign_keys)[0].parent is a2.c.user_id
assert u2.join(a2).onclause.compare(u2.c.id == a2.c.user_id)
meta2 = MetaData()
u2 = Table(
"users",
meta2,
Column("id", sa.Integer, primary_key=True),
autoload_with=connection,
)
a2 = Table(
"addresses",
meta2,
Column("id", sa.Integer, primary_key=True),
Column("user_id", sa.Integer, sa.ForeignKey("users.id")),
autoload_with=connection,
)
s = sa.select(a2).subquery()
assert s.c.user_id is not None
assert len(a2.foreign_keys) == 1
assert len(a2.c.user_id.foreign_keys) == 1
assert len(a2.constraints) == 2
assert [c.parent for c in a2.foreign_keys] == [a2.c.user_id]
assert [c.parent for c in a2.c.user_id.foreign_keys] == [a2.c.user_id]
assert list(a2.c.user_id.foreign_keys)[0].parent is a2.c.user_id
assert u2.join(a2).onclause.compare(u2.c.id == a2.c.user_id)
@testing.only_on(["postgresql", "mysql"])
def test_fk_options(self, connection, metadata):
"""test that foreign key reflection includes options (on
backends with {dialect}.get_foreign_keys() support)"""
if testing.against("postgresql"):
test_attrs = (
"match",
"onupdate",
"ondelete",
"deferrable",
"initially",
)
addresses_user_id_fkey = sa.ForeignKey(
# Each option is specifically not a Postgres default, or
# it won't be returned by PG's inspection
"users.id",
name="addresses_user_id_fkey",
match="FULL",
onupdate="RESTRICT",
ondelete="RESTRICT",
deferrable=True,
initially="DEFERRED",
)
elif testing.against("mysql"):
# MATCH, DEFERRABLE, and INITIALLY cannot be defined for MySQL
# ON UPDATE and ON DELETE have defaults of RESTRICT, which are
# elided by MySQL's inspection
addresses_user_id_fkey = sa.ForeignKey(
"users.id",
name="addresses_user_id_fkey",
onupdate="CASCADE",
ondelete="CASCADE",
)
test_attrs = ("onupdate", "ondelete")
meta = metadata
Table(
"users",
meta,
Column("id", sa.Integer, primary_key=True),
Column("name", sa.String(30)),
test_needs_fk=True,
)
Table(
"addresses",
meta,
Column("id", sa.Integer, primary_key=True),
Column("user_id", sa.Integer, addresses_user_id_fkey),
test_needs_fk=True,
)
meta.create_all(connection)
meta2 = MetaData()
meta2.reflect(connection)
for fk in meta2.tables["addresses"].foreign_keys:
ref = addresses_user_id_fkey
for attr in test_attrs:
eq_(getattr(fk, attr), getattr(ref, attr))
def test_pks_not_uniques(self, connection, metadata):
"""test that primary key reflection not tripped up by unique
indexes"""
conn = connection
conn.exec_driver_sql(
"""
CREATE TABLE book (
id INTEGER NOT NULL,
title VARCHAR(100) NOT NULL,
series INTEGER,
series_id INTEGER,
UNIQUE(series, series_id),
PRIMARY KEY(id)
)"""
)
book = Table("book", metadata, autoload_with=connection)
assert book.primary_key.contains_column(book.c.id)
assert not book.primary_key.contains_column(book.c.series)
eq_(len(book.primary_key), 1)
def test_fk_error(self, connection, metadata):
Table(
"slots",
metadata,
Column("slot_id", sa.Integer, primary_key=True),
Column("pkg_id", sa.Integer, sa.ForeignKey("pkgs.pkg_id")),
Column("slot", sa.String(128)),
)
assert_raises_message(
sa.exc.InvalidRequestError,
"Foreign key associated with column 'slots.pkg_id' "
"could not find table 'pkgs' with which to generate "
"a foreign key to target column 'pkg_id'",
metadata.create_all,
connection,
)
def test_composite_pks(self, connection, metadata):
"""test reflection of a composite primary key"""
conn = connection
conn.exec_driver_sql(
"""
CREATE TABLE book (
id INTEGER NOT NULL,
isbn VARCHAR(50) NOT NULL,
title VARCHAR(100) NOT NULL,
series INTEGER NOT NULL,
series_id INTEGER NOT NULL,
UNIQUE(series, series_id),
PRIMARY KEY(id, isbn)
)"""
)
book = Table("book", metadata, autoload_with=connection)
assert book.primary_key.contains_column(book.c.id)
assert book.primary_key.contains_column(book.c.isbn)
assert not book.primary_key.contains_column(book.c.series)
eq_(len(book.primary_key), 2)
def test_composite_fk(self, connection, metadata):
"""test reflection of composite foreign keys"""
meta = metadata
multi = Table(
"multi",
meta,
Column("multi_id", sa.Integer, primary_key=True),
Column("multi_rev", sa.Integer, primary_key=True),
Column("multi_hoho", sa.Integer, primary_key=True),
Column("name", sa.String(50), nullable=False),
Column("val", sa.String(100)),
test_needs_fk=True,
)
multi2 = Table(
"multi2",
meta,
Column("id", sa.Integer, primary_key=True),
Column("foo", sa.Integer),
Column("bar", sa.Integer),
Column("lala", sa.Integer),
Column("data", sa.String(50)),
sa.ForeignKeyConstraint(
["foo", "bar", "lala"],
["multi.multi_id", "multi.multi_rev", "multi.multi_hoho"],
),
test_needs_fk=True,
)
meta.create_all(connection)
meta2 = MetaData()
table = Table("multi", meta2, autoload_with=connection)
table2 = Table("multi2", meta2, autoload_with=connection)
self.assert_tables_equal(multi, table)
self.assert_tables_equal(multi2, table2)
j = sa.join(table, table2)
self.assert_(
sa.and_(
table.c.multi_id == table2.c.foo,
table.c.multi_rev == table2.c.bar,
table.c.multi_hoho == table2.c.lala,
).compare(j.onclause)
)
@testing.crashes("oracle", "FIXME: unknown, confirm not fails_on")
@testing.requires.check_constraints
def test_reserved(self, connection, metadata):
# check a table that uses a SQL reserved name doesn't cause an
# error
meta = metadata
table_a = Table(
"select",
meta,
Column("not", sa.Integer, primary_key=True),
Column("from", sa.String(12), nullable=False),
sa.UniqueConstraint("from", name="when"),
)
sa.Index("where", table_a.c["from"])
if connection.dialect.requires_name_normalize:
check_col = "TRUE"
else:
check_col = "true"
quoter = connection.dialect.identifier_preparer.quote_identifier
Table(
"false",
meta,
Column("create", sa.Integer, primary_key=True),
Column("true", sa.Integer, sa.ForeignKey("select.not")),
sa.CheckConstraint("%s <> 1" % quoter(check_col), name="limit"),
)
table_c = Table(
"is",
meta,
Column("or", sa.Integer, nullable=False, primary_key=True),
Column("join", sa.Integer, nullable=False, primary_key=True),
sa.PrimaryKeyConstraint("or", "join", name="to"),
)
index_c = sa.Index("else", table_c.c.join)
meta.create_all(connection)
index_c.drop(connection)
meta2 = MetaData()
Table("select", meta2, autoload_with=connection)
Table("false", meta2, autoload_with=connection)
Table("is", meta2, autoload_with=connection)
def test_reflect_all(self, connection, metadata):
names = ["rt_%s" % name for name in ("a", "b", "c", "d", "e")]
nameset = set(names)
baseline = metadata
for name in names:
Table(name, baseline, Column("id", sa.Integer, primary_key=True))
baseline.create_all(connection)
m1 = MetaData()
is_false(m1.tables)
m1.reflect(connection)
is_true(nameset.issubset(set(m1.tables.keys())))
m2 = MetaData()
m2.reflect(connection, only=["rt_a", "rt_b"])
eq_(set(m2.tables.keys()), {"rt_a", "rt_b"})
m3 = MetaData()
m3.reflect(connection, only=lambda name, meta: name == "rt_c")
eq_(set(m3.tables.keys()), {"rt_c"})
m4 = MetaData()
assert_raises_message(
sa.exc.InvalidRequestError,
r"Could not reflect: requested table\(s\) not available in "
r"Engine\(.*?\): \(rt_f\)",
m4.reflect,
connection,
only=["rt_a", "rt_f"],
)
m5 = MetaData()
m5.reflect(connection, only=[])
is_false(m5.tables)
m6 = MetaData()
m6.reflect(connection, only=lambda n, m: False)
is_false(m6.tables)
m7 = MetaData()
m7.reflect(connection)
is_true(nameset.issubset(set(m7.tables.keys())))
m8_e1 = MetaData()
rt_c = Table("rt_c", m8_e1)
m8_e1.reflect(connection, extend_existing=True)
eq_(set(m8_e1.tables.keys()), set(names))
eq_(rt_c.c.keys(), ["id"])
m8_e2 = MetaData()
rt_c = Table("rt_c", m8_e2)
m8_e2.reflect(connection, extend_existing=True, only=["rt_a", "rt_c"])
eq_(set(m8_e2.tables.keys()), {"rt_a", "rt_c"})
eq_(rt_c.c.keys(), ["id"])
baseline.drop_all(connection)
m9 = MetaData()
m9.reflect(connection)
is_false(m9.tables)
def test_reflect_all_unreflectable_table(self, connection, metadata):
names = ["rt_%s" % name for name in ("a", "b", "c", "d", "e")]
for name in names:
Table(name, metadata, Column("id", sa.Integer, primary_key=True))
metadata.create_all(connection)
m = MetaData()
inspector = sa.engine.reflection.Inspector
reflect_table = inspector.reflect_table
def patched(self, table, *arg, **kw):
if table.name == "rt_c":
raise sa.exc.UnreflectableTableError("Can't reflect rt_c")
else:
return reflect_table(self, table, *arg, **kw)
with mock.patch.object(inspector, "reflect_table", patched):
with expect_warnings("Skipping table rt_c: Can't reflect rt_c"):
m.reflect(connection)
assert_raises_message(
sa.exc.UnreflectableTableError,
"Can't reflect rt_c",
Table,
"rt_c",
m,
autoload_with=connection,
)
def test_index_reflection(self, connection, metadata):
m1 = metadata
t1 = Table(
"party",
m1,
Column("id", sa.Integer, nullable=False),
Column("name", sa.String(20), index=True),
)
sa.Index("idx1", t1.c.id, unique=True)
sa.Index("idx2", t1.c.name, t1.c.id, unique=False)
m1.create_all(connection)
m2 = MetaData()
t2 = Table("party", m2, autoload_with=connection)
assert len(t2.indexes) == 3
# Make sure indexes are in the order we expect them in
tmp = [(idx.name, idx) for idx in t2.indexes]
tmp.sort()
r1, r2, r3 = (idx[1] for idx in tmp)
assert r1.name == "idx1"
assert r2.name == "idx2"
assert r1.unique == True # noqa
assert r2.unique == False # noqa
assert r3.unique == False # noqa
assert {t2.c.id} == set(r1.columns)
assert {t2.c.name, t2.c.id} == set(r2.columns)
assert {t2.c.name} == set(r3.columns)
@testing.requires.comment_reflection
def test_comment_reflection(self, connection, metadata):
m1 = metadata
Table(
"sometable",
m1,
Column("id", sa.Integer, comment="c1 comment"),
comment="t1 comment",
)
m1.create_all(connection)
m2 = MetaData()
t2 = Table("sometable", m2, autoload_with=connection)
eq_(t2.comment, "t1 comment")
eq_(t2.c.id.comment, "c1 comment")
t3 = Table("sometable", m2, extend_existing=True)
eq_(t3.comment, "t1 comment")
eq_(t3.c.id.comment, "c1 comment")
@testing.requires.check_constraint_reflection
def test_check_constraint_reflection(self, connection, metadata):
m1 = metadata
Table(
"x",
m1,
Column("q", Integer),
sa.CheckConstraint("q > 10", name="ck1"),
)
m1.create_all(connection)
m2 = MetaData()
t2 = Table("x", m2, autoload_with=connection)
cks = [
const
for const in t2.constraints
if isinstance(const, sa.CheckConstraint)
]
eq_(len(cks), 1)
ck = cks[0]
eq_regex(ck.sqltext.text, r"[\(`]*q[\)`]* > 10")
eq_(ck.name, "ck1")
def test_index_reflection_cols_busted(self, connection, metadata):
t = Table("x", metadata, Column("a", Integer), Column("b", Integer))
sa.Index("x_ix", t.c.a, t.c.b)
metadata.create_all(connection)
gri = Inspector._get_reflection_info
def mock_gri(self, *a, **kw):
res = gri(self, *a, **kw)
res.columns[(None, "x")] = [
col for col in res.columns[(None, "x")] if col["name"] == "b"
]
return res
with testing.mock.patch.object(
Inspector, "_get_reflection_info", mock_gri
):
m = MetaData()
with testing.expect_warnings(
"index key 'a' was not located in columns"
):
t = Table("x", m, autoload_with=connection)
eq_(list(t.indexes)[0].columns, [t.c.b])
def test_index_reflection_expression_not_found(self, connection, metadata):
t = Table("x", metadata, Column("a", Integer), Column("b", Integer))
sa.Index("x_ix", t.c.a)
sa.Index("x_iy", t.c.a, t.c.b)
metadata.create_all(connection)
gri = Inspector._get_reflection_info
def mock_gri(self, *a, **kw):
res = gri(self, *a, **kw)
for idx in res.indexes[(None, "x")]:
if idx["name"] == "x_iy":
idx["column_names"][1] = None
idx.pop("expressions", None)
return res
with testing.mock.patch.object(
Inspector, "_get_reflection_info", mock_gri
):
m = MetaData()
with testing.expect_warnings(
"Skipping index 'x_iy' because key 2 reflected as None"
):
t = Table("x", m, autoload_with=connection)
eq_(len(t.indexes), 1)
eq_(list(t.indexes)[0].name, "x_ix")
@testing.requires.views
def test_views(self, connection, metadata):
users, addresses, dingalings = createTables(metadata)
try:
metadata.create_all(connection)
_create_views(connection, None)
m2 = MetaData()
users_v = Table("users_v", m2, autoload_with=connection)
addresses_v = Table(
"email_addresses_v", m2, autoload_with=connection
)
for c1, c2 in zip(users_v.c, users.c):
eq_(c1.name, c2.name)
self.assert_types_base(c1, c2)
for c1, c2 in zip(addresses_v.c, addresses.c):
eq_(c1.name, c2.name)
self.assert_types_base(c1, c2)
finally:
_drop_views(connection)
@testing.requires.views
def test_reflect_all_with_views(self, connection, metadata):
users, addresses, dingalings = createTables(metadata, None)
try:
metadata.create_all(connection)
_create_views(connection, None)
m2 = MetaData()
m2.reflect(connection, views=False)
eq_(set(m2.tables), {"users", "email_addresses", "dingalings"})
m2 = MetaData()
m2.reflect(connection, views=True)
eq_(
set(m2.tables),
{
"email_addresses_v",
"users_v",
"users",
"dingalings",
"email_addresses",
},
)
finally:
_drop_views(connection)
| ReflectionTest |
python | huggingface__transformers | tests/models/encoder_decoder/test_modeling_encoder_decoder.py | {
"start": 46336,
"end": 48405
} | class ____(EncoderDecoderMixin, unittest.TestCase):
def get_encoder_decoder_model(self, config, decoder_config):
encoder_model = RobertaModel(config)
decoder_model = RobertaForCausalLM(decoder_config)
return encoder_model, decoder_model
def prepare_config_and_inputs(self):
model_tester = RobertaModelTester(self)
encoder_config_and_inputs = model_tester.prepare_config_and_inputs()
decoder_config_and_inputs = model_tester.prepare_config_and_inputs_for_decoder()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = encoder_config_and_inputs
(
decoder_config,
decoder_input_ids,
decoder_token_type_ids,
decoder_input_mask,
decoder_sequence_labels,
decoder_token_labels,
decoder_choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = decoder_config_and_inputs
# make sure that cross attention layers are added
decoder_config.add_cross_attention = True
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_config": decoder_config,
"decoder_input_ids": decoder_input_ids,
"decoder_token_type_ids": decoder_token_type_ids,
"decoder_attention_mask": decoder_input_mask,
"decoder_sequence_labels": decoder_sequence_labels,
"decoder_token_labels": decoder_token_labels,
"decoder_choice_labels": decoder_choice_labels,
"encoder_hidden_states": encoder_hidden_states,
"labels": decoder_token_labels,
}
def get_pretrained_model(self):
return EncoderDecoderModel.from_encoder_decoder_pretrained(
"FacebookAI/roberta-base", "FacebookAI/roberta-base"
)
@require_torch
| RoBertaEncoderDecoderModelTest |
python | chroma-core__chroma | chromadb/api/models/CollectionCommon.py | {
"start": 2981,
"end": 35762
} | class ____(Generic[ClientT]):
_model: CollectionModel
_client: ClientT
_embedding_function: Optional[EmbeddingFunction[Embeddable]]
_data_loader: Optional[DataLoader[Loadable]]
def __init__(
self,
client: ClientT,
model: CollectionModel,
embedding_function: Optional[
EmbeddingFunction[Embeddable]
] = DefaultEmbeddingFunction(), # type: ignore
data_loader: Optional[DataLoader[Loadable]] = None,
):
"""Initializes a new instance of the Collection class."""
self._client = client
self._model = model
# Check to make sure the embedding function has the right signature, as defined by the EmbeddingFunction protocol
if embedding_function is not None:
validate_embedding_function(embedding_function)
self._embedding_function = embedding_function
self._data_loader = data_loader
# Expose the model properties as read-only properties on the Collection class
@property
def id(self) -> UUID:
return self._model.id
@property
def name(self) -> str:
return self._model.name
@property
def configuration(self) -> CollectionConfiguration:
return load_collection_configuration_from_json(self._model.configuration_json)
@property
def configuration_json(self) -> Dict[str, Any]:
return self._model.configuration_json
@property
def schema(self) -> Optional[Schema]:
return Schema.deserialize_from_json(
self._model.serialized_schema if self._model.serialized_schema else {}
)
@property
def metadata(self) -> CollectionMetadata:
return cast(CollectionMetadata, self._model.metadata)
@property
def tenant(self) -> str:
return self._model.tenant
@property
def database(self) -> str:
return self._model.database
def __eq__(self, other: object) -> bool:
if not isinstance(other, CollectionCommon):
return False
id_match = self.id == other.id
name_match = self.name == other.name
configuration_match = self.configuration_json == other.configuration_json
schema_match = self.schema == other.schema
metadata_match = self.metadata == other.metadata
tenant_match = self.tenant == other.tenant
database_match = self.database == other.database
embedding_function_match = self._embedding_function == other._embedding_function
data_loader_match = self._data_loader == other._data_loader
return (
id_match
and name_match
and configuration_match
and schema_match
and metadata_match
and tenant_match
and database_match
and embedding_function_match
and data_loader_match
)
def __repr__(self) -> str:
return f"Collection(name={self.name})"
def get_model(self) -> CollectionModel:
return self._model
@validation_context("add")
def _validate_and_prepare_add_request(
self,
ids: OneOrMany[ID],
embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
],
metadatas: Optional[OneOrMany[Metadata]],
documents: Optional[OneOrMany[Document]],
images: Optional[OneOrMany[Image]],
uris: Optional[OneOrMany[URI]],
) -> AddRequest:
# Unpack
add_records = normalize_insert_record_set(
ids=ids,
embeddings=embeddings,
metadatas=metadatas,
documents=documents,
images=images,
uris=uris,
)
# Validate
validate_insert_record_set(record_set=add_records)
validate_record_set_contains_any(record_set=add_records, contains_any={"ids"})
# Prepare
if add_records["embeddings"] is None:
validate_record_set_for_embedding(record_set=add_records)
add_embeddings = self._embed_record_set(record_set=add_records)
else:
add_embeddings = add_records["embeddings"]
add_metadatas = self._apply_sparse_embeddings_to_metadatas(
add_records["metadatas"], add_records["documents"]
)
return AddRequest(
ids=add_records["ids"],
embeddings=add_embeddings,
metadatas=add_metadatas,
documents=add_records["documents"],
uris=add_records["uris"],
)
@validation_context("get")
def _validate_and_prepare_get_request(
self,
ids: Optional[OneOrMany[ID]],
where: Optional[Where],
where_document: Optional[WhereDocument],
include: Include,
) -> GetRequest:
# Unpack
unpacked_ids: Optional[IDs] = maybe_cast_one_to_many(target=ids)
filters = FilterSet(where=where, where_document=where_document)
# Validate
if unpacked_ids is not None:
validate_ids(ids=unpacked_ids)
validate_filter_set(filter_set=filters)
validate_include(include=include, dissalowed=["distances"])
if "data" in include and self._data_loader is None:
raise ValueError(
"You must set a data loader on the collection if loading from URIs."
)
# Prepare
request_include = include
# We need to include uris in the result from the API to load datas
if "data" in include and "uris" not in include:
request_include.append("uris")
return GetRequest(
ids=unpacked_ids,
where=filters["where"],
where_document=filters["where_document"],
include=request_include,
)
@validation_context("query")
def _validate_and_prepare_query_request(
self,
query_embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
],
query_texts: Optional[OneOrMany[Document]],
query_images: Optional[OneOrMany[Image]],
query_uris: Optional[OneOrMany[URI]],
ids: Optional[OneOrMany[ID]],
n_results: int,
where: Optional[Where],
where_document: Optional[WhereDocument],
include: Include,
) -> QueryRequest:
# Unpack
query_records = normalize_base_record_set(
embeddings=query_embeddings,
documents=query_texts,
images=query_images,
uris=query_uris,
)
filter_ids = maybe_cast_one_to_many(ids)
filters = FilterSet(
where=where,
where_document=where_document,
)
# Validate
validate_base_record_set(record_set=query_records)
validate_filter_set(filter_set=filters)
validate_include(include=include)
validate_n_results(n_results=n_results)
# Prepare
if query_records["embeddings"] is None:
validate_record_set_for_embedding(record_set=query_records)
request_embeddings = self._embed_record_set(
record_set=query_records, is_query=True
)
else:
request_embeddings = query_records["embeddings"]
request_where = filters["where"]
request_where_document = filters["where_document"]
# We need to manually include uris in the result from the API to load datas
request_include = include
if "data" in request_include and "uris" not in request_include:
request_include.append("uris")
return QueryRequest(
embeddings=request_embeddings,
ids=filter_ids,
where=request_where,
where_document=request_where_document,
include=request_include,
n_results=n_results,
)
@validation_context("update")
def _validate_and_prepare_update_request(
self,
ids: OneOrMany[ID],
embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
],
metadatas: Optional[OneOrMany[Metadata]],
documents: Optional[OneOrMany[Document]],
images: Optional[OneOrMany[Image]],
uris: Optional[OneOrMany[URI]],
) -> UpdateRequest:
# Unpack
update_records = normalize_insert_record_set(
ids=ids,
embeddings=embeddings,
metadatas=metadatas,
documents=documents,
images=images,
uris=uris,
)
# Validate
validate_insert_record_set(record_set=update_records)
# Prepare
if update_records["embeddings"] is None:
# TODO: Handle URI updates.
if (
update_records["documents"] is not None
or update_records["images"] is not None
):
validate_record_set_for_embedding(
update_records, embeddable_fields={"documents", "images"}
)
update_embeddings = self._embed_record_set(record_set=update_records)
else:
update_embeddings = None
else:
update_embeddings = update_records["embeddings"]
update_metadatas = self._apply_sparse_embeddings_to_metadatas(
update_records["metadatas"], update_records["documents"]
)
return UpdateRequest(
ids=update_records["ids"],
embeddings=update_embeddings,
metadatas=update_metadatas,
documents=update_records["documents"],
uris=update_records["uris"],
)
@validation_context("upsert")
def _validate_and_prepare_upsert_request(
self,
ids: OneOrMany[ID],
embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None,
images: Optional[OneOrMany[Image]] = None,
uris: Optional[OneOrMany[URI]] = None,
) -> UpsertRequest:
# Unpack
upsert_records = normalize_insert_record_set(
ids=ids,
embeddings=embeddings,
metadatas=metadatas,
documents=documents,
images=images,
uris=uris,
)
# Validate
validate_insert_record_set(record_set=upsert_records)
# Prepare
if upsert_records["embeddings"] is None:
validate_record_set_for_embedding(
record_set=upsert_records, embeddable_fields={"documents", "images"}
)
upsert_embeddings = self._embed_record_set(record_set=upsert_records)
else:
upsert_embeddings = upsert_records["embeddings"]
upsert_metadatas = self._apply_sparse_embeddings_to_metadatas(
upsert_records["metadatas"], upsert_records["documents"]
)
return UpsertRequest(
ids=upsert_records["ids"],
metadatas=upsert_metadatas,
embeddings=upsert_embeddings,
documents=upsert_records["documents"],
uris=upsert_records["uris"],
)
@validation_context("delete")
def _validate_and_prepare_delete_request(
self,
ids: Optional[IDs],
where: Optional[Where],
where_document: Optional[WhereDocument],
) -> DeleteRequest:
if ids is None and where is None and where_document is None:
raise ValueError(
"At least one of ids, where, or where_document must be provided"
)
# Unpack
if ids is not None:
request_ids = cast(IDs, maybe_cast_one_to_many(ids))
else:
request_ids = None
filters = FilterSet(where=where, where_document=where_document)
# Validate
if request_ids is not None:
validate_ids(ids=request_ids)
validate_filter_set(filter_set=filters)
return DeleteRequest(
ids=request_ids, where=where, where_document=where_document
)
def _transform_peek_response(self, response: GetResult) -> GetResult:
if response["embeddings"] is not None:
response["embeddings"] = np.array(response["embeddings"])
return response
def _transform_get_response(
self, response: GetResult, include: Include
) -> GetResult:
if (
"data" in include
and self._data_loader is not None
and response["uris"] is not None
):
response["data"] = self._data_loader(response["uris"])
if "embeddings" in include:
response["embeddings"] = np.array(response["embeddings"])
# Remove URIs from the result if they weren't requested
if "uris" not in include:
response["uris"] = None
return response
def _transform_query_response(
self, response: QueryResult, include: Include
) -> QueryResult:
if (
"data" in include
and self._data_loader is not None
and response["uris"] is not None
):
response["data"] = [self._data_loader(uris) for uris in response["uris"]]
if "embeddings" in include and response["embeddings"] is not None:
response["embeddings"] = [
np.array(embedding) for embedding in response["embeddings"]
]
# Remove URIs from the result if they weren't requested
if "uris" not in include:
response["uris"] = None
return response
def _validate_modify_request(self, metadata: Optional[CollectionMetadata]) -> None:
if metadata is not None:
validate_metadata(metadata)
if "hnsw:space" in metadata:
raise ValueError(
"Changing the distance function of a collection once it is created is not supported currently."
)
def _update_model_after_modify_success(
self,
name: Optional[str],
metadata: Optional[CollectionMetadata],
configuration: Optional[UpdateCollectionConfiguration],
) -> None:
if name:
self._model["name"] = name
if metadata:
self._model["metadata"] = metadata
if configuration:
self._model.set_configuration(
overwrite_collection_configuration(
self._model.get_configuration(), configuration
)
)
# If schema exists, also update it with the configuration changes
if self.schema:
from chromadb.api.collection_configuration import (
update_schema_from_collection_configuration,
)
updated_schema = update_schema_from_collection_configuration(
self.schema, configuration
)
self._model["serialized_schema"] = updated_schema.serialize_to_json()
def _get_sparse_embedding_targets(self) -> Dict[str, "SparseVectorIndexConfig"]:
schema = self.schema
if schema is None:
return {}
targets: Dict[str, "SparseVectorIndexConfig"] = {}
for key, value_types in schema.keys.items():
if value_types.sparse_vector is None:
continue
sparse_index = value_types.sparse_vector.sparse_vector_index
if sparse_index is None or not sparse_index.enabled:
continue
config = sparse_index.config
if config.embedding_function is None or config.source_key is None:
continue
targets[key] = config
return targets
def _apply_sparse_embeddings_to_metadatas(
self,
metadatas: Optional[List[Metadata]],
documents: Optional[List[Document]] = None,
) -> Optional[List[Metadata]]:
sparse_targets = self._get_sparse_embedding_targets()
if not sparse_targets:
return metadatas
# If no metadatas provided, create empty dicts based on documents length
if metadatas is None:
if documents is None:
return None
metadatas = [{} for _ in range(len(documents))]
# Create copies, converting None to empty dict
updated_metadatas: List[Dict[str, Any]] = [
dict(metadata) if metadata is not None else {} for metadata in metadatas
]
documents_list = list(documents) if documents is not None else None
for target_key, config in sparse_targets.items():
source_key = config.source_key
embedding_func = config.embedding_function
if source_key is None or embedding_func is None:
continue
if not isinstance(embedding_func, SparseEmbeddingFunction):
embedding_func = cast(SparseEmbeddingFunction[Any], embedding_func)
validate_sparse_embedding_function(embedding_func)
# Initialize collection lists for batch processing
inputs: List[str] = []
positions: List[int] = []
# Handle special case: source_key is "#document"
if source_key == DOCUMENT_KEY:
if documents_list is None:
continue
# Collect documents that need embedding
for idx, metadata in enumerate(updated_metadatas):
# Skip if target already exists in metadata
if target_key in metadata:
continue
# Get document at this position
if idx < len(documents_list):
doc = documents_list[idx]
if isinstance(doc, str):
inputs.append(doc)
positions.append(idx)
# Generate embeddings for all collected documents
if len(inputs) == 0:
continue
sparse_embeddings = self._sparse_embed(
input=inputs,
sparse_embedding_function=embedding_func,
)
if len(sparse_embeddings) != len(positions):
raise ValueError(
"Sparse embedding function returned unexpected number of embeddings."
)
for position, embedding in zip(positions, sparse_embeddings):
updated_metadatas[position][target_key] = embedding
continue # Skip the metadata-based logic below
# Handle normal case: source_key is a metadata field
for idx, metadata in enumerate(updated_metadatas):
if target_key in metadata:
continue
source_value = metadata.get(source_key)
if not isinstance(source_value, str):
continue
inputs.append(source_value)
positions.append(idx)
if len(inputs) == 0:
continue
sparse_embeddings = self._sparse_embed(
input=inputs,
sparse_embedding_function=embedding_func,
)
if len(sparse_embeddings) != len(positions):
raise ValueError(
"Sparse embedding function returned unexpected number of embeddings."
)
for position, embedding in zip(positions, sparse_embeddings):
updated_metadatas[position][target_key] = embedding
# Convert empty dicts back to None, validation requires non-empty dicts or None
result_metadatas: List[Optional[Metadata]] = [
metadata if metadata else None for metadata in updated_metadatas
]
validate_metadatas(cast(List[Metadata], result_metadatas))
return cast(List[Metadata], result_metadatas)
def _embed_record_set(
self,
record_set: BaseRecordSet,
embeddable_fields: Optional[Set[str]] = None,
is_query: bool = False,
) -> Embeddings:
if embeddable_fields is None:
embeddable_fields = get_default_embeddable_record_set_fields()
for field in embeddable_fields:
if record_set[field] is not None: # type: ignore[literal-required]
# uris require special handling
if field == "uris":
if self._data_loader is None:
raise ValueError(
"You must set a data loader on the collection if loading from URIs."
)
return self._embed(
input=self._data_loader(uris=cast(URIs, record_set[field])), # type: ignore[literal-required]
is_query=is_query,
)
else:
return self._embed(
input=record_set[field], # type: ignore[literal-required]
is_query=is_query,
)
raise ValueError(
"Record does not contain any non-None fields that can be embedded."
f"Embeddable Fields: {embeddable_fields}"
f"Record Fields: {record_set}"
)
def _embed(self, input: Any, is_query: bool = False) -> Embeddings:
if self._embedding_function is not None and not isinstance(
self._embedding_function, DefaultEmbeddingFunction
):
if is_query:
return self._embedding_function.embed_query(input=input)
else:
return self._embedding_function(input=input)
config_ef = self.configuration.get("embedding_function")
if config_ef is not None:
if is_query:
return config_ef.embed_query(input=input)
else:
return config_ef(input=input)
schema = self.schema
schema_embedding_function: Optional[EmbeddingFunction[Embeddable]] = None
if schema is not None:
override = schema.keys.get(EMBEDDING_KEY)
if (
override is not None
and override.float_list is not None
and override.float_list.vector_index is not None
and override.float_list.vector_index.config.embedding_function
is not None
):
schema_embedding_function = cast(
EmbeddingFunction[Embeddable],
override.float_list.vector_index.config.embedding_function,
)
elif (
schema.defaults.float_list is not None
and schema.defaults.float_list.vector_index is not None
and schema.defaults.float_list.vector_index.config.embedding_function
is not None
):
schema_embedding_function = cast(
EmbeddingFunction[Embeddable],
schema.defaults.float_list.vector_index.config.embedding_function,
)
if schema_embedding_function is not None:
if is_query and hasattr(schema_embedding_function, "embed_query"):
return schema_embedding_function.embed_query(input=input)
return schema_embedding_function(input=input)
if self._embedding_function is None:
raise ValueError(
"You must provide an embedding function to compute embeddings."
"https://docs.trychroma.com/guides/embeddings"
)
if is_query:
return self._embedding_function.embed_query(input=input)
else:
return self._embedding_function(input=input)
def _sparse_embed(
self,
input: Any,
sparse_embedding_function: SparseEmbeddingFunction[Any],
is_query: bool = False,
) -> Any:
if is_query:
return sparse_embedding_function.embed_query(input=input)
return sparse_embedding_function(input=input)
def _embed_knn_string_queries(self, knn: Any) -> Any:
"""Embed string queries in Knn objects using the appropriate embedding function.
Args:
knn: A Knn object that may have a string query
Returns:
A Knn object with the string query replaced by an embedding
Raises:
ValueError: If the query is a string but no embedding function is available
"""
from chromadb.execution.expression.operator import Knn
if not isinstance(knn, Knn):
return knn
# If query is not a string, nothing to do
if not isinstance(knn.query, str):
return knn
query_text = knn.query
key = knn.key
# Handle main embedding field
if key == EMBEDDING_KEY:
# Use the collection's main embedding function
embedding = self._embed(input=[query_text], is_query=True)
if not embedding or len(embedding) != 1:
raise ValueError(
"Embedding function returned unexpected number of embeddings"
)
# Return a new Knn with the embedded query
return Knn(
query=embedding[0],
key=knn.key,
limit=knn.limit,
default=knn.default,
return_rank=knn.return_rank,
)
# Handle metadata field with potential sparse embedding
schema = self.schema
if schema is None or key not in schema.keys:
raise ValueError(
f"Cannot embed string query for key '{key}': "
f"key not found in schema. Please provide an embedded vector or "
f"configure an embedding function for this key in the schema."
)
value_type = schema.keys[key]
# Check for sparse vector with embedding function
if value_type.sparse_vector is not None:
sparse_index = value_type.sparse_vector.sparse_vector_index
if sparse_index is not None and sparse_index.enabled:
sparse_config = sparse_index.config
if sparse_config.embedding_function is not None:
embedding_func = sparse_config.embedding_function
if not isinstance(embedding_func, SparseEmbeddingFunction):
embedding_func = cast(
SparseEmbeddingFunction[Any], embedding_func
)
validate_sparse_embedding_function(embedding_func)
# Embed the query
sparse_embedding = self._sparse_embed(
input=[query_text],
sparse_embedding_function=embedding_func,
is_query=True,
)
if not sparse_embedding or len(sparse_embedding) != 1:
raise ValueError(
"Sparse embedding function returned unexpected number of embeddings"
)
# Return a new Knn with the sparse embedding
return Knn(
query=sparse_embedding[0],
key=knn.key,
limit=knn.limit,
default=knn.default,
return_rank=knn.return_rank,
)
# Check for dense vector with embedding function (float_list)
if value_type.float_list is not None:
vector_index = value_type.float_list.vector_index
if vector_index is not None and vector_index.enabled:
dense_config = vector_index.config
if dense_config.embedding_function is not None:
embedding_func = dense_config.embedding_function
validate_embedding_function(embedding_func)
# Embed the query using the schema's embedding function
try:
embeddings = embedding_func.embed_query(input=[query_text])
except AttributeError:
# Fallback if embed_query doesn't exist
embeddings = embedding_func([query_text])
if not embeddings or len(embeddings) != 1:
raise ValueError(
"Embedding function returned unexpected number of embeddings"
)
# Return a new Knn with the dense embedding
return Knn(
query=embeddings[0],
key=knn.key,
limit=knn.limit,
default=knn.default,
return_rank=knn.return_rank,
)
raise ValueError(
f"Cannot embed string query for key '{key}': "
f"no embedding function configured for this key in the schema. "
f"Please provide an embedded vector or configure an embedding function."
)
def _embed_rank_string_queries(self, rank: Any) -> Any:
"""Recursively embed string queries in Rank expressions.
Args:
rank: A Rank expression that may contain Knn objects with string queries
Returns:
A Rank expression with all string queries embedded
"""
# Import here to avoid circular dependency
from chromadb.execution.expression.operator import (
Knn,
Abs,
Div,
Exp,
Log,
Max,
Min,
Mul,
Sub,
Sum,
Val,
Rrf,
)
if rank is None:
return None
# Base case: Knn - embed if it has a string query
if isinstance(rank, Knn):
return self._embed_knn_string_queries(rank)
# Base case: Val - no embedding needed
if isinstance(rank, Val):
return rank
# Recursive cases: walk through child ranks
if isinstance(rank, Abs):
return Abs(self._embed_rank_string_queries(rank.rank))
if isinstance(rank, Div):
return Div(
self._embed_rank_string_queries(rank.left),
self._embed_rank_string_queries(rank.right),
)
if isinstance(rank, Exp):
return Exp(self._embed_rank_string_queries(rank.rank))
if isinstance(rank, Log):
return Log(self._embed_rank_string_queries(rank.rank))
if isinstance(rank, Max):
return Max([self._embed_rank_string_queries(r) for r in rank.ranks])
if isinstance(rank, Min):
return Min([self._embed_rank_string_queries(r) for r in rank.ranks])
if isinstance(rank, Mul):
return Mul([self._embed_rank_string_queries(r) for r in rank.ranks])
if isinstance(rank, Sub):
return Sub(
self._embed_rank_string_queries(rank.left),
self._embed_rank_string_queries(rank.right),
)
if isinstance(rank, Sum):
return Sum([self._embed_rank_string_queries(r) for r in rank.ranks])
if isinstance(rank, Rrf):
return Rrf(
ranks=[self._embed_rank_string_queries(r) for r in rank.ranks],
k=rank.k,
weights=rank.weights,
normalize=rank.normalize,
)
# Unknown rank type - return as is
return rank
def _embed_search_string_queries(self, search: Any) -> Any:
"""Embed string queries in a Search object.
Args:
search: A Search object that may contain Knn objects with string queries
Returns:
A Search object with all string queries embedded
"""
# Import here to avoid circular dependency
from chromadb.execution.expression.plan import Search
if not isinstance(search, Search):
return search
# Embed the rank expression if it exists
embedded_rank = self._embed_rank_string_queries(search._rank)
# Create a new Search with the embedded rank
return Search(
where=search._where,
rank=embedded_rank,
limit=search._limit,
select=search._select,
)
| CollectionCommon |
python | ray-project__ray | release/microbenchmark/experimental/gpu_object_microbenchmark.py | {
"start": 1340,
"end": 6730
} | class ____:
def __init__(
self,
shape: Tuple[int],
dtype: torch.dtype,
device: torch.device,
) -> None:
self.device = device
self.dtype = dtype
self.shape = shape
def send(self) -> torch.Tensor:
seed = int(np.random.randint(100))
return torch.ones(self.shape, dtype=self.dtype, device=self.device) * seed
def recv(self, tensor: torch.Tensor):
assert tensor.device.type == self.device.type
# Return the first element of the tensor to make sure the actor has received the tensor.
return tensor[0].item()
def _exec_p2p_transfer(
label: str,
shape: Tuple[int],
backend: str,
sender_hint: ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy,
receiver_hint: ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy,
):
if backend not in BACKEND_CONFIG:
raise ValueError(f"Unsupported backend: {backend}")
backend_config = BACKEND_CONFIG[backend]
device = backend_config.device
init_actor_kwargs = backend_config.init_actor_kwargs
send_method_kwargs = backend_config.send_method_kwargs
collective_group_backend = backend_config.collective_group_backend
sender = Actor.options(scheduling_strategy=sender_hint, **init_actor_kwargs).remote(
shape, DTYPE, device
)
receiver = Actor.options(
scheduling_strategy=receiver_hint, **init_actor_kwargs
).remote(shape, DTYPE, device)
if collective_group_backend is not None:
create_collective_group([sender, receiver], backend=collective_group_backend)
def _run():
ref = sender.send.options(**send_method_kwargs).remote()
ref2 = receiver.recv.remote(ref)
ray.get(ref2)
results = timeit(label, _run)
kill_actor_and_wait_for_failure(sender)
kill_actor_and_wait_for_failure(receiver)
return results
def _exec_p2p_transfer_multiple_shapes(
label: str,
backend: str,
sender_hint: ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy,
receiver_hint: ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy,
):
temp_results = []
for shape in SHAPE:
temp_results += _exec_p2p_transfer(
f"{label}_shape_{shape}", shape, backend, sender_hint, receiver_hint
)
return temp_results
def _exec_p2p_transfer_object(
sender_hint: ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy,
receiver_hint: ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy,
):
return _exec_p2p_transfer_multiple_shapes(
"exec_p2p_transfer_object", "object", sender_hint, receiver_hint
)
def _exec_p2p_transfer_gloo(
sender_hint: ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy,
receiver_hint: ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy,
):
return _exec_p2p_transfer_multiple_shapes(
"exec_p2p_transfer_gloo", "gloo", sender_hint, receiver_hint
)
def _exec_p2p_transfer_nccl(
sender_hint: ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy,
receiver_hint: ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy,
):
return _exec_p2p_transfer_multiple_shapes(
"exec_p2p_transfer_nccl", "nccl", sender_hint, receiver_hint
)
def to_dict_key(key: str):
for r in [" ", ":", "-"]:
key = key.replace(r, "_")
for r in ["(", ")"]:
key = key.replace(r, "")
return key
def main() -> None:
p = argparse.ArgumentParser(description="GPU tensor transfer benchmark")
p.add_argument(
"--distributed",
action="store_true",
help="Whether this is running on more than one node",
)
args = p.parse_args()
ray.init(logging_level="ERROR")
distributed = args.distributed
sender_hint, receiver_hint = None, None
if distributed:
local_node_id = ray.get_runtime_context().get_node_id()
node_ids = [node["NodeID"] for node in ray.nodes()]
remote_node_ids = [node_id for node_id in node_ids if node_id != local_node_id]
assert remote_node_ids
remote_node_id = remote_node_ids[0]
# Pin sender on local node and receiver on the other node for consistent
# results.
sender_hint = ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy(
local_node_id, soft=False
)
receiver_hint = ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy(
remote_node_id, soft=False
)
results = []
results.extend(_exec_p2p_transfer_object(sender_hint, receiver_hint))
results.extend(_exec_p2p_transfer_gloo(sender_hint, receiver_hint))
results.extend(_exec_p2p_transfer_nccl(sender_hint, receiver_hint))
result_dict = {
f"{to_dict_key(v[0])}": (v[1], v[2]) for v in results if v is not None
}
perf_metrics = [
{
"perf_metric_name": to_dict_key(v[0]),
"perf_metric_value": v[1],
"perf_metric_type": "THROUGHPUT",
}
for v in results
if v is not None
]
result_dict["perf_metrics"] = perf_metrics
test_output_json = os.environ.get(
"TEST_OUTPUT_JSON", "/tmp/microbenchmark_gpu_object.json"
)
with open(test_output_json, "wt") as f:
json.dump(result_dict, f)
if __name__ == "__main__":
main()
| Actor |
python | pytorch__pytorch | torch/testing/_internal/quantization_torch_package_models.py | {
"start": 73,
"end": 477
} | class ____(nn.Module):
def __init__(self, N):
super().__init__()
self.w1 = nn.Parameter(torch.empty(N, N))
self.b1 = nn.Parameter(torch.zeros(N))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = torch.nn.functional.linear(x, self.w1, self.b1)
x = torch.nn.functional.relu(x)
return x
| LinearReluFunctionalChild |
python | scipy__scipy | scipy/optimize/_differentiable_functions.py | {
"start": 15278,
"end": 15472
} | class ____:
def __init__(self, fun):
self.fun = fun
self.nfev = 0
def __call__(self, x):
self.nfev += 1
return np.atleast_1d(self.fun(x))
| _VectorFunWrapper |
python | anthropics__anthropic-sdk-python | src/anthropic/types/tool_choice_tool_param.py | {
"start": 219,
"end": 552
} | class ____(TypedDict, total=False):
name: Required[str]
"""The name of the tool to use."""
type: Required[Literal["tool"]]
disable_parallel_tool_use: bool
"""Whether to disable parallel tool use.
Defaults to `false`. If set to `true`, the model will output exactly one tool
use.
"""
| ToolChoiceToolParam |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran/resources.py | {
"start": 40184,
"end": 60434
} | class ____(ConfigurableResource):
"""This class represents a Fivetran workspace and provides utilities
to interact with Fivetran APIs.
"""
account_id: str = Field(description="The Fivetran account ID.")
api_key: str = Field(description="The Fivetran API key to use for this resource.")
api_secret: str = Field(description="The Fivetran API secret to use for this resource.")
snapshot_path: Optional[str] = Field(
default=None,
description=(
"Path to a snapshot file to load Fivetran data from,"
"rather than fetching it from the Fivetran API."
),
)
request_max_retries: int = Field(
default=3,
description=(
"The maximum number of times requests to the Fivetran API should be retried "
"before failing."
),
)
request_retry_delay: float = Field(
default=0.25,
description="Time (in seconds) to wait between each request retry.",
)
disable_schedule_on_trigger: bool = Field(
default=True,
description=(
"Whether to disable the schedule of a connector when it is synchronized using this resource."
"Defaults to True."
),
)
@cached_property
def _log(self) -> logging.Logger:
return get_dagster_logger()
@cached_property
def snapshot(self) -> Optional[RepositoryLoadData]:
snapshot = None
if self.snapshot_path and not os.getenv(FIVETRAN_SNAPSHOT_ENV_VAR_NAME):
snapshot = deserialize_value(Path(self.snapshot_path).read_text(), RepositoryLoadData)
return snapshot
@cached_method
def get_client(self) -> FivetranClient:
return FivetranClient(
api_key=self.api_key,
api_secret=self.api_secret,
request_max_retries=self.request_max_retries,
request_retry_delay=self.request_retry_delay,
disable_schedule_on_trigger=self.disable_schedule_on_trigger,
)
@cached_method
def fetch_fivetran_workspace_data(
self,
) -> FivetranWorkspaceData:
"""Retrieves all Fivetran content from the workspace and returns it as a FivetranWorkspaceData object.
Returns:
FivetranWorkspaceData: A snapshot of the Fivetran workspace's content.
"""
connectors_by_id = {}
destinations_by_id = {}
schema_configs_by_connector_id = {}
client = self.get_client()
groups = client.get_groups()["items"]
for group in groups:
group_id = group["id"]
destination_details = client.get_destination_details(destination_id=group_id)
destination = FivetranDestination.from_destination_details(
destination_details=destination_details
)
destinations_by_id[destination.id] = destination
connectors_details = client.list_connectors_for_group(group_id=group_id)
for connector_details in connectors_details:
connector = FivetranConnector.from_connector_details(
connector_details=connector_details,
)
if not connector.is_connected:
self._log.warning(
f"Ignoring incomplete or broken connector `{connector.name}`. "
f"Dagster requires a connector to be connected before fetching its data."
)
continue
schema_config_details = client.get_schema_config_for_connector(
connector_id=connector.id, raise_on_not_found_error=False
)
schema_config = (
FivetranSchemaConfig.from_schema_config_details(
schema_config_details=schema_config_details
)
if schema_config_details
else None
)
# A connector that has not been synced yet has no `schemas` field in its schema config.
# Schemas are required for creating the asset definitions,
# so connectors for which the schemas are missing are discarded.
if not schema_config or not schema_config.has_schemas:
self._log.warning(
f"Ignoring connector `{connector.name}`. "
f"Dagster requires connector schema information to represent this connector, "
f"which is not available until this connector has been run for the first time."
)
continue
connectors_by_id[connector.id] = connector
schema_configs_by_connector_id[connector.id] = schema_config
return FivetranWorkspaceData(
connectors_by_id=connectors_by_id,
destinations_by_id=destinations_by_id,
schema_configs_by_connector_id=schema_configs_by_connector_id,
)
def get_or_fetch_workspace_data(
self,
) -> FivetranWorkspaceData:
"""Retrieves all Fivetran content from the workspace using the FivetranWorkspaceDefsLoader
and returns it as a FivetranWorkspaceData object. If the workspace data has already been fetched,
the cached FivetranWorkspaceData object is returned.
Returns:
FivetranWorkspaceData: A snapshot of the Fivetran workspace's content.
"""
return FivetranWorkspaceDefsLoader(
workspace=self,
translator=DagsterFivetranTranslator(),
snapshot=self.snapshot,
).get_or_fetch_state()
@cached_method
def load_asset_specs(
self,
dagster_fivetran_translator: Optional[DagsterFivetranTranslator] = None,
connector_selector_fn: Optional[ConnectorSelectorFn] = None,
) -> Sequence[AssetSpec]:
"""Returns a list of AssetSpecs representing the Fivetran content in the workspace.
Args:
dagster_fivetran_translator (Optional[DagsterFivetranTranslator], optional): The translator to use
to convert Fivetran content into :py:class:`dagster.AssetSpec`.
Defaults to :py:class:`DagsterFivetranTranslator`.
connector_selector_fn (Optional[ConnectorSelectorFn]):
A function that allows for filtering which Fivetran connector assets are created for.
Returns:
List[AssetSpec]: The set of assets representing the Fivetran content in the workspace.
Examples:
Loading the asset specs for a given Fivetran workspace:
.. code-block:: python
from dagster_fivetran import FivetranWorkspace, load_fivetran_asset_specs
import dagster as dg
fivetran_workspace = FivetranWorkspace(
account_id=dg.EnvVar("FIVETRAN_ACCOUNT_ID"),
api_key=dg.EnvVar("FIVETRAN_API_KEY"),
api_secret=dg.EnvVar("FIVETRAN_API_SECRET"),
)
fivetran_specs = fivetran_workspace.load_asset_specs()
defs = dg.Definitions(assets=[*fivetran_specs], resources={"fivetran": fivetran_workspace}
"""
dagster_fivetran_translator = dagster_fivetran_translator or DagsterFivetranTranslator()
with self.process_config_and_initialize_cm() as initialized_workspace:
return [
spec.merge_attributes(
metadata={DAGSTER_FIVETRAN_TRANSLATOR_METADATA_KEY: dagster_fivetran_translator}
)
for spec in check.is_list(
FivetranWorkspaceDefsLoader(
workspace=initialized_workspace,
translator=dagster_fivetran_translator,
connector_selector_fn=connector_selector_fn,
snapshot=self.snapshot,
)
.build_defs()
.assets,
AssetSpec,
)
]
def _generate_materialization(
self,
fivetran_output: FivetranOutput,
dagster_fivetran_translator: DagsterFivetranTranslator,
):
connector = FivetranConnector.from_connector_details(
connector_details=fivetran_output.connector_details
)
schema_config = FivetranSchemaConfig.from_schema_config_details(
schema_config_details=fivetran_output.schema_config
)
for schema in schema_config.schemas.values():
if not schema.enabled:
continue
for table in schema.tables.values():
if not table.enabled:
continue
asset_key = dagster_fivetran_translator.get_asset_spec(
props=FivetranConnectorTableProps(
table=get_fivetran_connector_table_name(
schema_name=schema.name_in_destination,
table_name=table.name_in_destination,
),
connector_id=connector.id,
connector_name=connector.name,
connector_url=connector.url,
destination_id=connector.destination_id,
schema_config=schema_config,
database=None,
service=None,
)
).key
yield AssetMaterialization(
asset_key=asset_key,
description=(
f"Table generated via Fivetran sync: {schema.name_in_destination}.{table.name_in_destination}"
),
metadata={
**metadata_for_table(
as_dict(table),
get_fivetran_connector_url(fivetran_output.connector_details),
include_column_info=True,
database=None,
schema=schema.name_in_destination,
table=table.name_in_destination,
),
**FivetranMetadataSet(
connector_id=connector.id,
connector_name=connector.name,
destination_id=connector.destination_id,
destination_schema_name=schema.name_in_destination,
destination_table_name=table.name_in_destination,
),
},
)
@public
def sync_and_poll(
self,
context: AssetExecutionContext,
config: Optional[FivetranSyncConfig] = None,
) -> FivetranEventIterator[Union[AssetMaterialization, MaterializeResult]]:
"""Executes a sync and poll process to materialize Fivetran assets.
This method can only be used in the context of an asset execution.
Args:
context (AssetExecutionContext): The execution context
from within `@fivetran_assets`.
config (Optional[FivetranSyncConfig]): Optional configuration to control sync behavior.
If config.resync is True, performs a historical resync instead of a normal sync.
If config.resync_parameters is provided, only the specified tables will be resynced.
Returns:
Iterator[Union[AssetMaterialization, MaterializeResult]]: An iterator of MaterializeResult
or AssetMaterialization.
Examples:
Normal sync (without config):
.. code-block:: python
from dagster import AssetExecutionContext
from dagster_fivetran import FivetranWorkspace, fivetran_assets
@fivetran_assets(connector_id="my_connector", workspace=fivetran_workspace)
def my_fivetran_assets(context: AssetExecutionContext, fivetran: FivetranWorkspace):
yield from fivetran.sync_and_poll(context=context)
Historical resync of specific tables (config passed at runtime):
.. code-block:: python
from dagster import AssetExecutionContext
from dagster_fivetran import FivetranWorkspace, FivetranSyncConfig, fivetran_assets
@fivetran_assets(connector_id="my_connector", workspace=fivetran_workspace)
def my_fivetran_assets(
context: AssetExecutionContext,
fivetran: FivetranWorkspace,
config: FivetranSyncConfig,
):
# When materializing, pass config with:
# resync=True
# resync_parameters={"schema_name": ["table1", "table2"]}
yield from fivetran.sync_and_poll(context=context, config=config)
Full historical resync (config passed at runtime):
.. code-block:: python
from dagster import AssetExecutionContext
from dagster_fivetran import FivetranWorkspace, FivetranSyncConfig, fivetran_assets
@fivetran_assets(connector_id="my_connector", workspace=fivetran_workspace)
def my_fivetran_assets(
context: AssetExecutionContext,
fivetran: FivetranWorkspace,
config: FivetranSyncConfig,
):
# When materializing, pass config with resync=True to resync all tables
yield from fivetran.sync_and_poll(context=context, config=config)
"""
if config and config.resync:
return FivetranEventIterator(
events=self._resync_and_poll(
context=context, resync_parameters=config.resync_parameters
),
fivetran_workspace=self,
context=context,
)
else:
return FivetranEventIterator(
events=self._sync_and_poll(context=context),
fivetran_workspace=self,
context=context,
)
def _sync_and_poll(self, context: AssetExecutionContext):
assets_def = context.assets_def
dagster_fivetran_translator = get_translator_from_fivetran_assets(assets_def)
connector_id = next(
check.not_none(FivetranMetadataSet.extract(spec.metadata).connector_id)
for spec in assets_def.specs
)
client = self.get_client()
fivetran_output = client.sync_and_poll(
connector_id=connector_id,
)
# The FivetranOutput is None if the connector hasn't been synced
if not fivetran_output:
context.log.warning(
f"The connector with ID {connector_id} is currently paused and so it has not been synced. "
f"Make sure that your connector is enabled before syncing it with Dagster."
)
return
materialized_asset_keys = set()
for materialization in self._generate_materialization(
fivetran_output=fivetran_output, dagster_fivetran_translator=dagster_fivetran_translator
):
# Scan through all tables actually created, if it was expected then emit a MaterializeResult.
# Otherwise, emit a runtime AssetMaterialization.
if materialization.asset_key in context.selected_asset_keys:
yield MaterializeResult(
asset_key=materialization.asset_key, metadata=materialization.metadata
)
materialized_asset_keys.add(materialization.asset_key)
else:
context.log.warning(
f"An unexpected asset was materialized: {materialization.asset_key}. "
f"Yielding a materialization event."
)
yield materialization
unmaterialized_asset_keys = context.selected_asset_keys - materialized_asset_keys
if unmaterialized_asset_keys:
context.log.warning(f"Assets were not materialized: {unmaterialized_asset_keys}")
def _resync_and_poll(
self,
context: AssetExecutionContext,
resync_parameters: Optional[Mapping[str, Sequence[str]]] = None,
):
assets_def = context.assets_def
dagster_fivetran_translator = get_translator_from_fivetran_assets(assets_def)
connector_id = next(
check.not_none(FivetranMetadataSet.extract(spec.metadata).connector_id)
for spec in assets_def.specs
)
client = self.get_client()
fivetran_output = client.resync_and_poll(
connector_id=connector_id,
resync_parameters=resync_parameters,
)
# The FivetranOutput is None if the connector hasn't been synced
if not fivetran_output:
context.log.warning(
f"The connector with ID {connector_id} is currently paused and so it has not been resynced. "
f"Make sure that your connector is enabled before resyncing it with Dagster."
)
return
materialized_asset_keys = set()
for materialization in self._generate_materialization(
fivetran_output=fivetran_output, dagster_fivetran_translator=dagster_fivetran_translator
):
# Scan through all tables actually created, if it was expected then emit a MaterializeResult.
# Otherwise, emit a runtime AssetMaterialization.
if materialization.asset_key in context.selected_asset_keys:
yield MaterializeResult(
asset_key=materialization.asset_key, metadata=materialization.metadata
)
materialized_asset_keys.add(materialization.asset_key)
else:
context.log.warning(
f"An unexpected asset was materialized: {materialization.asset_key}. "
f"Yielding a materialization event."
)
yield materialization
unmaterialized_asset_keys = context.selected_asset_keys - materialized_asset_keys
if unmaterialized_asset_keys:
context.log.warning(f"Assets were not materialized: {unmaterialized_asset_keys}")
def load_fivetran_asset_specs(
workspace: FivetranWorkspace,
dagster_fivetran_translator: Optional[DagsterFivetranTranslator] = None,
connector_selector_fn: Optional[ConnectorSelectorFn] = None,
) -> Sequence[AssetSpec]:
"""Returns a list of AssetSpecs representing the Fivetran content in the workspace.
Args:
workspace (FivetranWorkspace): The Fivetran workspace to fetch assets from.
dagster_fivetran_translator (Optional[DagsterFivetranTranslator], optional): The translator to use
to convert Fivetran content into :py:class:`dagster.AssetSpec`.
Defaults to :py:class:`DagsterFivetranTranslator`.
connector_selector_fn (Optional[ConnectorSelectorFn]):
A function that allows for filtering which Fivetran connector assets are created for.
Returns:
List[AssetSpec]: The set of assets representing the Fivetran content in the workspace.
Examples:
Loading the asset specs for a given Fivetran workspace:
.. code-block:: python
from dagster_fivetran import FivetranWorkspace, load_fivetran_asset_specs
import dagster as dg
fivetran_workspace = FivetranWorkspace(
account_id=dg.EnvVar("FIVETRAN_ACCOUNT_ID"),
api_key=dg.EnvVar("FIVETRAN_API_KEY"),
api_secret=dg.EnvVar("FIVETRAN_API_SECRET"),
)
fivetran_specs = load_fivetran_asset_specs(fivetran_workspace)
defs = dg.Definitions(assets=[*fivetran_specs], resources={"fivetran": fivetran_workspace}
"""
return workspace.load_asset_specs(
dagster_fivetran_translator=dagster_fivetran_translator,
connector_selector_fn=connector_selector_fn,
)
@record
| FivetranWorkspace |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 26732,
"end": 26953
} | class ____(_CoerceLiterals, _ColumnCoercions, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _text_coercion(self, element, argname=None):
return _no_text_coercion(element, argname)
| WhereHavingImpl |
python | gevent__gevent | src/gevent/tests/test__pool.py | {
"start": 8817,
"end": 14808
} | class ____(greentest.TestCase): # pylint:disable=too-many-public-methods
__timeout__ = greentest.LARGE_TIMEOUT
size = 1
def setUp(self):
greentest.TestCase.setUp(self)
self.pool = gevent.pool.Pool(self.size)
def cleanup(self):
self.pool.join()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), 25)
self.assertEqual(papply(sqr, (), {'x': 3}), 9)
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(SMALL_RANGE)), list(map(squared, range(SMALL_RANGE))))
self.assertEqual(pmap(sqr, range(100)), list(map(squared, range(100))))
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimeoutAlmostEqual(get.elapsed, TIMEOUT1, 1)
def test_async_callback(self):
result = []
res = self.pool.apply_async(sqr, (7, TIMEOUT1,), callback=result.append)
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimeoutAlmostEqual(get.elapsed, TIMEOUT1, 1)
gevent.sleep(0) # lets the callback run
self.assertEqual(result, [49])
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(gevent.Timeout, get, timeout=TIMEOUT2)
self.assertTimeoutAlmostEqual(get.elapsed, TIMEOUT2, 1)
self.pool.join()
def test_imap_list_small(self):
it = self.pool.imap(sqr, range(SMALL_RANGE))
self.assertEqual(list(it), list(map(sqr, range(SMALL_RANGE))))
def test_imap_it_small(self):
it = self.pool.imap(sqr, range(SMALL_RANGE))
for i in range(SMALL_RANGE):
self.assertEqual(next(it), i * i)
self.assertRaises(StopIteration, next, it)
def test_imap_it_large(self):
it = self.pool.imap(sqr, range(LARGE_RANGE))
for i in range(LARGE_RANGE):
self.assertEqual(next(it), i * i)
self.assertRaises(StopIteration, next, it)
def test_imap_random(self):
it = self.pool.imap(sqr_random_sleep, range(SMALL_RANGE))
self.assertEqual(list(it), list(map(squared, range(SMALL_RANGE))))
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(LARGE_RANGE))
self.assertEqual(sorted(it), list(map(squared, range(LARGE_RANGE))))
it = self.pool.imap_unordered(sqr, range(LARGE_RANGE))
self.assertEqual(sorted(it), list(map(squared, range(LARGE_RANGE))))
def test_imap_unordered_random(self):
it = self.pool.imap_unordered(sqr_random_sleep, range(SMALL_RANGE))
self.assertEqual(sorted(it), list(map(squared, range(SMALL_RANGE))))
def test_empty_imap_unordered(self):
it = self.pool.imap_unordered(sqr, [])
self.assertEqual(list(it), [])
def test_empty_imap(self):
it = self.pool.imap(sqr, [])
self.assertEqual(list(it), [])
def test_empty_map(self):
self.assertEqual(self.pool.map(sqr, []), [])
def test_terminate(self):
result = self.pool.map_async(gevent.sleep, [0.1] * ((self.size or 10) * 2))
gevent.sleep(0.1)
kill = TimingWrapper(self.pool.kill)
kill()
self.assertTimeWithinRange(kill.elapsed, 0.0, 0.5)
result.join()
def sleep(self, x):
gevent.sleep(float(x) / 10.)
return str(x)
def test_imap_unordered_sleep(self):
# testing that imap_unordered returns items in competion order
result = list(self.pool.imap_unordered(self.sleep, [10, 1, 2]))
if self.pool.size == 1:
expected = ['10', '1', '2']
else:
expected = ['1', '2', '10']
self.assertEqual(result, expected)
# https://github.com/gevent/gevent/issues/423
def test_imap_no_stop(self):
q = Queue()
q.put(123)
gevent.spawn_later(0.1, q.put, StopIteration)
result = list(self.pool.imap(lambda _: _, q))
self.assertEqual(result, [123])
def test_imap_unordered_no_stop(self):
q = Queue()
q.put(1234)
gevent.spawn_later(0.1, q.put, StopIteration)
result = list(self.pool.imap_unordered(lambda _: _, q))
self.assertEqual(result, [1234])
# same issue, but different test: https://github.com/gevent/gevent/issues/311
def test_imap_final_sleep(self):
result = list(self.pool.imap(sqr, final_sleep()))
self.assertEqual(result, [0, 1, 4])
def test_imap_unordered_final_sleep(self):
result = list(self.pool.imap_unordered(sqr, final_sleep()))
self.assertEqual(result, [0, 1, 4])
# Issue 638
def test_imap_unordered_bounded_queue(self):
iterable = list(range(100))
running = [0]
def short_running_func(i, _j):
running[0] += 1
return i
def make_reader(mapping):
# Simulate a long running reader. No matter how many workers
# we have, we will never have a queue more than size 1
def reader():
result = []
for i, x in enumerate(mapping):
self.assertTrue(running[0] <= i + 2, running[0])
result.append(x)
gevent.sleep(0.01)
self.assertTrue(len(mapping.queue) <= 2, len(mapping.queue))
return result
return reader
# Send two iterables to make sure varargs and kwargs are handled
# correctly
for meth in self.pool.imap_unordered, self.pool.imap:
running[0] = 0
mapping = meth(short_running_func, iterable, iterable,
maxsize=1)
reader = make_reader(mapping)
l = reader()
self.assertEqual(sorted(l), iterable)
@greentest.ignores_leakcheck
| TestPool |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 4747,
"end": 5055
} | class ____(Exception):
"""Two or more recommendations for the same task suggested different finish states"""
@lru_cache
def _default_data_size() -> int:
return parse_bytes(dask.config.get("distributed.scheduler.default-data-size"))
@dataclass(repr=False, eq=False, slots=True)
| RecommendationsConflict |
python | dagster-io__dagster | python_modules/libraries/dagster-deltalake/dagster_deltalake/config.py | {
"start": 279,
"end": 533
} | class ____(Config):
"""Storage configuration for local object store."""
provider: Literal["local"] = "local"
def str_dict(self) -> dict[str, str]:
"""Storage options as str dict."""
return _to_str_dict(self.dict())
| LocalConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-s3/integration_tests/test_acceptance.py | {
"start": 1007,
"end": 6994
} | class ____(BaseModel):
"""Acceptance test instance, as a Pydantic model.
This class represents an acceptance test instance, which is a single test case
that can be run against a connector. It is used to deserialize and validate the
acceptance test configuration file.
"""
class AcceptanceTestExpectRecords(BaseModel):
path: Path
exact_order: bool = False
class AcceptanceTestFileTypes(BaseModel):
skip_test: bool
bypass_reason: str
config_path: Path
configured_catalog_path: Path | None = None
timeout_seconds: int | None = None
expect_records: AcceptanceTestExpectRecords | None = None
file_types: AcceptanceTestFileTypes | None = None
status: Literal["succeed", "failed"] | None = None
@property
def expect_exception(self) -> bool:
return self.status and self.status == "failed"
@property
def instance_name(self) -> str:
return self.config_path.stem
def get_acceptance_tests(category: str) -> list[AcceptanceTestInstance]:
all_tests_config = yaml.safe_load(ACCEPTANCE_TEST_CONFIG_PATH.read_text())
return [
AcceptanceTestInstance.model_validate(test)
for test in all_tests_config["acceptance_tests"][category]["tests"]
if "iam_role" not in test["config_path"]
]
# TODO: Convert to a CDK class for better reuse and portability.
# class TestSourceAcceptanceTestSuiteBase:
# """Test suite for acceptance tests."""
SOURCE_CLASS: type[Source] = SourceS3
def run_test_job(
verb: Literal["read", "check", "discover"],
test_instance: AcceptanceTestInstance,
catalog: dict | None = None,
) -> entrypoint_wrapper.EntrypointOutput:
"""Run a test job from provided CLI args and return the result."""
args = [verb]
if test_instance.config_path:
args += ["--config", str(test_instance.config_path)]
catalog_path: Path | None = None
if verb not in ["discover", "check"]:
if catalog:
# Write the catalog to a temp json file and pass the path to the file as an argument.
catalog_path = Path(tempfile.gettempdir()) / "airbyte-test" / f"temp_catalog_{uuid.uuid4().hex}.json"
catalog_path.parent.mkdir(parents=True, exist_ok=True)
catalog_path.write_text(orjson.dumps(catalog).decode())
elif test_instance.configured_catalog_path:
catalog_path = Path(test_instance.configured_catalog_path)
if catalog_path:
args += ["--catalog", str(catalog_path)]
# This is a bit of a hack because the source needs the catalog early.
# Because it *also* can fail, we have ot redundantly wrap it in a try/except block.
try:
source: Source = SOURCE_CLASS.create(
configured_catalog_path=catalog_path,
)
except Exception as ex:
if not test_instance.expect_exception:
raise
return entrypoint_wrapper.EntrypointOutput(
messages=[],
uncaught_exception=ex,
)
result: entrypoint_wrapper.EntrypointOutput = entrypoint_wrapper._run_command( # noqa: SLF001 # Non-public API
source=source,
args=args,
expecting_exception=test_instance.expect_exception,
)
if result.errors and not test_instance.expect_exception:
raise AssertionError(
"\n\n".join(
[str(err.trace.error).replace("\\n", "\n") for err in result.errors],
)
)
if test_instance.expect_exception and not result.errors:
raise AssertionError("Expected exception but got none.") # noqa: TRY003
return result
@pytest.mark.parametrize(
"instance",
get_acceptance_tests("full_refresh"),
ids=lambda instance: instance.instance_name,
)
def test_full_refresh(instance: AcceptanceTestInstance) -> None:
"""Run acceptance tests."""
result = run_test_job(
"read",
test_instance=instance,
)
if not result.records:
raise AssertionError("Expected records but got none.") # noqa: TRY003
@pytest.mark.parametrize(
"instance",
get_acceptance_tests("basic_read"),
ids=lambda instance: instance.instance_name,
)
def test_basic_read(instance: AcceptanceTestInstance) -> None:
"""Run acceptance tests."""
discover_result = run_test_job(
"discover",
test_instance=instance,
)
assert discover_result.catalog, "Expected a non-empty catalog."
configured_catalog = ConfiguredAirbyteCatalog(
streams=[
ConfiguredAirbyteStream(
stream=stream,
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.append_dedup,
)
for stream in discover_result.catalog.catalog.streams
]
)
result = run_test_job(
"read",
test_instance=instance,
catalog=configured_catalog,
)
if not result.records:
raise AssertionError("Expected records but got none.") # noqa: TRY003
@pytest.mark.parametrize(
"instance",
get_acceptance_tests("connection"),
ids=lambda instance: instance.instance_name,
)
def test_check(instance: AcceptanceTestInstance) -> None:
"""Run acceptance tests."""
result: entrypoint_wrapper.EntrypointOutput = run_test_job(
"check",
test_instance=instance,
)
conn_status_messages: list[AirbyteMessage] = [msg for msg in result._messages if msg.type == Type.CONNECTION_STATUS] # noqa: SLF001 # Non-public API
assert len(conn_status_messages) == 1, "Expected exactly one CONNECTION_STATUS message. Got: \n" + "\n".join(result._messages)
@pytest.mark.parametrize(
"instance",
get_acceptance_tests("full_refresh"),
ids=lambda instance: instance.instance_name,
)
def test_discover(instance: AcceptanceTestInstance) -> None:
"""Run acceptance tests."""
run_test_job(
"check",
test_instance=instance,
)
| AcceptanceTestInstance |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_bitcast_op_test.py | {
"start": 1083,
"end": 4606
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([
#=========================================================================
# Cast to same-size dtype.
#=========================================================================
dict(
descr='int32 to int32 cast',
inputs=ragged_factory_ops.constant_value(
[[1, 2], [3]],
dtype=dtypes.int32,
),
outputs=ragged_factory_ops.constant_value(
[[1, 2], [3]],
dtype=dtypes.int32,
)),
dict(
descr='int32 to uint32 cast',
inputs=ragged_factory_ops.constant_value(
[[1, 2], [-1]],
dtype=dtypes.int32,
),
outputs=ragged_factory_ops.constant_value(
[[1, 2], [4294967295]],
dtype=dtypes.uint32,
)),
dict(
descr='uint32 to int32 cast',
inputs=ragged_factory_ops.constant_value(
[[1, 2], [4294967295]],
dtype=dtypes.uint32,
),
outputs=ragged_factory_ops.constant_value(
[[1, 2], [-1]],
dtype=dtypes.int32,
)),
#=========================================================================
# Cast to larger dtype.
#=========================================================================
dict(
descr='int32 to int64 cast',
inputs=ragged_factory_ops.constant_value(
[[[1, 0], [2, 0]], [[3, 0]]],
dtype=dtypes.int32,
ragged_rank=1,
),
outputs=ragged_factory_ops.constant_value(
[[1, 2], [3]],
dtype=dtypes.int64,
)),
#=========================================================================
# Cast to smaller dtype.
#=========================================================================
dict(
descr='int64 to int32 cast',
inputs=ragged_factory_ops.constant_value(
[[1, 2], [3]],
dtype=dtypes.int64,
),
outputs=ragged_factory_ops.constant_value(
[[[1, 0], [2, 0]], [[3, 0]]],
dtype=dtypes.int32,
ragged_rank=1,
)),
]) # pyformat: disable
def testBitcast(self, descr, inputs, outputs, name=None):
result = ragged_array_ops.bitcast(inputs, outputs.dtype, name)
self.assertEqual(result.dtype, outputs.dtype)
self.assertEqual(result.ragged_rank, outputs.ragged_rank)
self.assertAllEqual(result, outputs)
@parameterized.parameters([
dict(
descr='Upcast requires uniform inner dimension',
inputs=ragged_factory_ops.constant_value(
[[[1, 0], [2, 0]], [[3, 0]]],
dtype=dtypes.int32,
ragged_rank=2,
),
cast_to_dtype=dtypes.int64,
exception=ValueError,
message='`input.flat_values` is required to have rank >= 2'),
]) # pyformat: disable
def testBitcastError(self,
descr,
inputs,
cast_to_dtype,
exception,
message,
name=None):
with self.assertRaisesRegex(exception, message):
result = ragged_array_ops.bitcast(inputs, cast_to_dtype, name)
self.evaluate(result)
if __name__ == '__main__':
googletest.main()
| RaggedSplitOpTest |
python | django__django | django/contrib/admin/helpers.py | {
"start": 1074,
"end": 2597
} | class ____:
def __init__(
self,
form,
fieldsets,
prepopulated_fields,
readonly_fields=None,
model_admin=None,
):
self.form, self.fieldsets = form, fieldsets
self.prepopulated_fields = [
{"field": form[field_name], "dependencies": [form[f] for f in dependencies]}
for field_name, dependencies in prepopulated_fields.items()
]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __repr__(self):
return (
f"<{self.__class__.__qualname__}: "
f"form={self.form.__class__.__qualname__} "
f"fieldsets={self.fieldsets!r}>"
)
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(
self.form,
name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options,
)
@property
def errors(self):
return self.form.errors
@property
def non_field_errors(self):
return self.form.non_field_errors
@property
def fields(self):
return self.form.fields
@property
def is_bound(self):
return self.form.is_bound
@property
def media(self):
media = self.form.media
for fs in self:
media += fs.media
return media
| AdminForm |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 24557,
"end": 24692
} | class ____(MixinStrictOrigin, TestRefererMiddleware):
req_meta = {"referrer_policy": POLICY_STRICT_ORIGIN}
| TestRequestMetaSrictOrigin |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/stack_ops_test.py | {
"start": 1184,
"end": 6335
} | class ____(test.TestCase):
def _testStackPushPop(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], self.evaluate(c1))
@test_util.run_deprecated_v1
def testStackPushPop(self):
self._testStackPushPop(use_gpu=False)
self._testStackPushPop(use_gpu=True)
def _testStackPushPopSwap(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
a = np.arange(2000)
x = constant_op.constant(a, dtype=dtypes.float32)
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
self.assertAllClose(a, self.evaluate(c1))
@test_util.run_deprecated_v1
def testStackPushPopSwap(self):
self._testStackPushPopSwap(use_gpu=False)
self._testStackPushPopSwap(use_gpu=True)
def _testStackWhileSwap(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(0)
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
def c(x):
return math_ops.less(x, 10)
def b(x):
with ops.control_dependencies([x]):
a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
v = gen_data_flow_ops.stack_push_v2(h, a, swap_memory=True)
with ops.control_dependencies([v]):
return math_ops.add(x, 1)
r = while_loop.while_loop(c, b, [n])
v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)
def c1(x, y):
del y
return math_ops.greater(x, 0)
def b1(x, y):
nx = math_ops.subtract(x, 1)
ny = y + gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
return [nx, ny]
_, ry = while_loop.while_loop(
c1, b1, [r, v],
[r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, self.evaluate(ry))
@test_util.run_v1_only("b/120545219")
def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True)
def _testMultiStack(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, 4.0)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
h2 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testMultiStack(self):
self._testMultiStack(use_gpu=False)
self._testMultiStack(use_gpu=True)
def _testSameNameStacks(self, use_gpu):
"""Different stacks with the same name do not interfere."""
with self.cached_session(use_gpu=use_gpu) as sess:
h1 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
h2 = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_push_v2(h1, 4.0)
with ops.control_dependencies([c1]):
c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
with ops.control_dependencies([c2]):
pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
out1, out2 = self.evaluate([pop1, pop2])
self.assertAllClose(out1, 4.0)
self.assertAllClose(out2, 5.0)
@test_util.run_deprecated_v1
def testSameNameStacks(self):
self._testSameNameStacks(use_gpu=False)
self._testSameNameStacks(use_gpu=True)
def _testCloseStack(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops.stack_close_v2(h)
self.evaluate(c1)
@test_util.run_deprecated_v1
def testCloseStack(self):
self._testCloseStack(use_gpu=False)
self._testCloseStack(use_gpu=True)
def _testPushCloseStack(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops.stack_v2(
-1, elem_type=dtypes.float32, stack_name="foo")
c = gen_data_flow_ops.stack_push_v2(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops.stack_close_v2(h)
self.evaluate(c1)
@test_util.run_deprecated_v1
def testPushCloseStack(self):
self._testPushCloseStack(use_gpu=False)
self._testPushCloseStack(use_gpu=True)
| StackOpTest |
python | ray-project__ray | rllib/core/learner/torch/tests/test_torch_learner_compile.py | {
"start": 412,
"end": 4371
} | class ____(unittest.TestCase):
@classmethod
def setUp(cls) -> None:
ray.init()
@classmethod
def tearDown(cls) -> None:
ray.shutdown()
# Todo (rllib-team): Fix for torch 2.0+
@unittest.skip("Failing with torch >= 2.0")
@unittest.skipIf(not _dynamo_is_available(), "torch._dynamo not available")
def test_torch_compile(self):
"""Test if torch.compile() can be applied and used on the learner.
Also tests if we can update with the compiled update method without errors.
"""
env = gym.make("CartPole-v1")
is_multi_agents = [False, True]
what_to_compiles = [
TorchCompileWhatToCompile.FORWARD_TRAIN,
TorchCompileWhatToCompile.COMPLETE_UPDATE,
]
for is_multi_agent, what_to_compile in itertools.product(
is_multi_agents, what_to_compiles
):
print(
f"Testing is_multi_agent={is_multi_agent},"
f"what_to_compile={what_to_compile}"
)
config = BaseTestingAlgorithmConfig().framework(
torch_compile_learner=True,
torch_compile_learner_what_to_compile=what_to_compile,
)
learner = config.build_learner(env=env)
learner.build()
reader = get_cartpole_dataset_reader(batch_size=512)
for iter_i in range(10):
batch = reader.next()
learner.update(batch.as_multi_agent())
rl_module_spec = config.get_default_rl_module_spec()
rl_module_spec.observation_space = env.observation_space
rl_module_spec.action_space = env.action_space
learner.add_module(module_id="another_module", module_spec=rl_module_spec)
for iter_i in range(10):
batch = MultiAgentBatch(
{"another_module": reader.next(), "default_policy": reader.next()},
0,
)
learner.update(batch)
learner.remove_module(module_id="another_module")
# Todo (rllib-team): Fix for torch 2.0+
@unittest.skip("Failing with torch >= 2.0")
@unittest.skipIf(not _dynamo_is_available(), "torch._dynamo not available")
def test_torch_compile_no_breaks(self):
"""Tests if torch.compile() does encounter too many breaks.
torch.compile() should ideally not encounter any breaks when compiling the
update method of the learner. This method tests if we encounter only a given
number of breaks.
"""
env = gym.make("CartPole-v1")
config = BaseTestingAlgorithmConfig().framework(torch_compile_learner=True)
learner = config.build_learner(env=env)
import torch._dynamo as dynamo
reader = get_cartpole_dataset_reader(batch_size=512)
batch = reader.next().as_multi_agent()
batch = learner._convert_batch_type(batch)
# The followingcall to dynamo.explain() breaks depending on the torch version.
# It works for torch==2.0.0.
# TODO(Artur): Fit this to to the correct torch version once it is enabled on
# CI.
# This is a helper method of dynamo to analyze where breaks occur.
(
explanation,
out_guards,
graphs,
ops_per_graph,
break_reasons,
explanation_verbose,
) = dynamo.explain(learner._update, batch)
print(explanation_verbose)
# There should be only one break reason - `return_value` - since inputs and
# outputs are not checked
# TODO(Artur): Attempt bringing breaks down to 1. (This may not be possible)
# Note: This test is skipped on CI if torch dynamo is available.
self.assertEqual(len(break_reasons), 3)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestLearner |
python | pandas-dev__pandas | pandas/core/computation/ops.py | {
"start": 13726,
"end": 14183
} | class ____(Op):
def __init__(self, func, args) -> None:
super().__init__(func.name, args)
self.func = func
def __call__(self, env):
# error: "Op" not callable
operands = [op(env) for op in self.operands] # type: ignore[operator]
return self.func.func(*operands)
def __repr__(self) -> str:
operands = map(str, self.operands)
return pprint_thing(f"{self.op}({','.join(operands)})")
| MathCall |
python | django__django | tests/test_runner_apps/databases/tests.py | {
"start": 294,
"end": 365
} | class ____(NoDatabaseTests):
databases = {"other"}
| OtherDatabaseTests |
python | marshmallow-code__marshmallow | tests/test_schema.py | {
"start": 71406,
"end": 73958
} | class ____:
class MySchema(Schema):
int_no_default = fields.Int(allow_none=True)
str_no_default = fields.Str(allow_none=True)
list_no_default = fields.List(fields.Str, allow_none=True)
nested_no_default = fields.Nested(UserSchema, many=True, allow_none=True)
int_with_default = fields.Int(allow_none=True, dump_default=42)
str_with_default = fields.Str(allow_none=True, dump_default="foo")
@pytest.fixture
def schema(self):
return self.MySchema()
@pytest.fixture
def data(self):
return dict(
int_no_default=None,
str_no_default=None,
list_no_default=None,
nested_no_default=None,
int_with_default=None,
str_with_default=None,
)
def test_missing_inputs_are_excluded_from_dump_output(self, schema, data):
for key in [
"int_no_default",
"str_no_default",
"list_no_default",
"nested_no_default",
]:
d = data.copy()
del d[key]
result = schema.dump(d)
# the missing key is not in the serialized result
assert key not in result
# the rest of the keys are in the result
assert all(k in result for k in d)
def test_none_is_serialized_to_none(self, schema, data):
errors = schema.validate(data)
assert errors == {}
result = schema.dump(data)
for key in data:
msg = f"result[{key!r}] should be None"
assert result[key] is None, msg
def test_default_and_value_missing(self, schema, data):
del data["int_with_default"]
del data["str_with_default"]
result = schema.dump(data)
assert result["int_with_default"] == 42
assert result["str_with_default"] == "foo"
def test_loading_none(self, schema, data):
result = schema.load(data)
for key in data:
assert result[key] is None
def test_missing_inputs_are_excluded_from_load_output(self, schema, data):
for key in [
"int_no_default",
"str_no_default",
"list_no_default",
"nested_no_default",
]:
d = data.copy()
del d[key]
result = schema.load(d)
# the missing key is not in the deserialized result
assert key not in result
# the rest of the keys are in the result
assert all(k in result for k in d)
| TestDefaults |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/policy/torch_policy.py | {
"start": 631,
"end": 6604
} | class ____(Policy):
def __init__(
self,
seed: int,
behavior_spec: BehaviorSpec,
network_settings: NetworkSettings,
actor_cls: type,
actor_kwargs: Dict[str, Any],
):
"""
Policy that uses a multilayer perceptron to map the observations to actions. Could
also use a CNN to encode visual input prior to the MLP. Supports discrete and
continuous actions, as well as recurrent networks.
:param seed: Random seed.
:param behavior_spec: Assigned BehaviorSpec object.
:param network_settings: Defined network parameters.
:param actor_cls: The type of Actor
:param actor_kwargs: Keyword args for the Actor class
"""
super().__init__(seed, behavior_spec, network_settings)
self.global_step = (
GlobalSteps()
) # could be much simpler if TorchPolicy is nn.Module
self.stats_name_to_update_name = {
"Losses/Value Loss": "value_loss",
"Losses/Policy Loss": "policy_loss",
}
self.actor = actor_cls(
observation_specs=self.behavior_spec.observation_specs,
network_settings=network_settings,
action_spec=behavior_spec.action_spec,
**actor_kwargs,
)
# Save the m_size needed for export
self._export_m_size = self.m_size
# m_size needed for training is determined by network, not trainer settings
self.m_size = self.actor.memory_size
self.actor.to(default_device())
@property
def export_memory_size(self) -> int:
"""
Returns the memory size of the exported ONNX policy. This only includes the memory
of the Actor and not any auxillary networks.
"""
return self._export_m_size
def _extract_masks(self, decision_requests: DecisionSteps) -> np.ndarray:
device = default_device()
mask = None
if self.behavior_spec.action_spec.discrete_size > 0:
num_discrete_flat = np.sum(self.behavior_spec.action_spec.discrete_branches)
mask = torch.ones(
[len(decision_requests), num_discrete_flat], device=device
)
if decision_requests.action_mask is not None:
mask = torch.as_tensor(
1 - np.concatenate(decision_requests.action_mask, axis=1),
device=device,
)
return mask
@timed
def evaluate(
self, decision_requests: DecisionSteps, global_agent_ids: List[str]
) -> Dict[str, Any]:
"""
Evaluates policy for the agent experiences provided.
:param global_agent_ids:
:param decision_requests: DecisionStep object containing inputs.
:return: Outputs from network as defined by self.inference_dict.
"""
obs = decision_requests.obs
masks = self._extract_masks(decision_requests)
device = default_device()
tensor_obs = [torch.as_tensor(np_ob, device=device) for np_ob in obs]
memories = torch.as_tensor(
self.retrieve_memories(global_agent_ids), device=device
).unsqueeze(0)
with torch.no_grad():
action, run_out, memories = self.actor.get_action_and_stats(
tensor_obs, masks=masks, memories=memories
)
run_out["action"] = action.to_action_tuple()
if "log_probs" in run_out:
run_out["log_probs"] = run_out["log_probs"].to_log_probs_tuple()
if "entropy" in run_out:
run_out["entropy"] = ModelUtils.to_numpy(run_out["entropy"])
if self.use_recurrent:
run_out["memory_out"] = ModelUtils.to_numpy(memories).squeeze(0)
return run_out
def get_action(
self, decision_requests: DecisionSteps, worker_id: int = 0
) -> ActionInfo:
"""
Decides actions given observations information, and takes them in environment.
:param worker_id:
:param decision_requests: A dictionary of behavior names and DecisionSteps from environment.
:return: an ActionInfo containing action, memories, values and an object
to be passed to add experiences
"""
if len(decision_requests) == 0:
return ActionInfo.empty()
global_agent_ids = [
get_global_agent_id(worker_id, int(agent_id))
for agent_id in decision_requests.agent_id
] # For 1-D array, the iterator order is correct.
run_out = self.evaluate(decision_requests, global_agent_ids)
self.save_memories(global_agent_ids, run_out.get("memory_out"))
self.check_nan_action(run_out.get("action"))
return ActionInfo(
action=run_out.get("action"),
env_action=run_out.get("env_action"),
outputs=run_out,
agent_ids=list(decision_requests.agent_id),
)
def get_current_step(self):
"""
Gets current model step.
:return: current model step.
"""
return self.global_step.current_step
def set_step(self, step: int) -> int:
"""
Sets current model step to step without creating additional ops.
:param step: Step to set the current model step to.
:return: The step the model was set to.
"""
self.global_step.current_step = step
return step
def increment_step(self, n_steps):
"""
Increments model step.
"""
self.global_step.increment(n_steps)
return self.get_current_step()
def load_weights(self, values: List[np.ndarray]) -> None:
self.actor.load_state_dict(values)
def init_load_weights(self) -> None:
pass
def get_weights(self) -> List[np.ndarray]:
return copy.deepcopy(self.actor.state_dict())
def get_modules(self):
return {"Policy": self.actor, "global_step": self.global_step}
| TorchPolicy |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 24603,
"end": 24722
} | class ____(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = FileStrategy
| UbuntuHostname |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/asset.py | {
"start": 1264,
"end": 1754
} | class ____(BaseModel):
"""Asset status information for status view."""
asset_health: Optional[str] # Overall health status
materialization_status: Optional[str]
freshness_status: Optional[str]
asset_checks_status: Optional[str]
health_metadata: Optional[DgApiAssetHealthMetadata]
latest_materialization: Optional[DgApiAssetMaterialization]
freshness_info: Optional[DgApiAssetFreshnessInfo]
checks_status: Optional[DgApiAssetChecksStatus]
| DgApiAssetStatus |
python | jazzband__django-formtools | tests/wizard/namedwizardtests/tests.py | {
"start": 391,
"end": 13029
} | class ____:
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
# Get new step data, since we modify it during the tests.
self.wizard_step_data = copy.deepcopy(self.wizard_step_data)
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def tearDown(self):
# Ensure that there are no files in the storage which could lead to false
# results in the next tests. Deleting the whole storage dir is not really
# an option since the storage is defined on the module level and can't be
# easily reinitialized. (FIXME: The tests here should use the view classes
# directly instead of the test client, then the storage issues would go
# away too.)
for file in temp_storage.listdir('')[1]:
temp_storage.delete(file)
def test_initial_call(self):
response = self.client.get(reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
self.assertEqual(wizard['url_name'], self.wizard_urlname)
def test_initial_call_with_params(self):
get_params = {'getvar1': 'getval1', 'getvar2': 'getval2'}
response = self.client.get(reverse('%s_start' % self.wizard_urlname),
get_params)
self.assertEqual(response.status_code, 302)
# Test for proper redirect GET parameters
location = response.url
self.assertNotEqual(location.find('?'), -1)
querydict = QueryDict(location[location.find('?') + 1:])
self.assertEqual(dict(querydict.items()), get_params)
def test_form_post_error(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': ['This field is required.'],
'user': ['This field is required.']})
def test_form_post_success(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={
'step': response.context['wizard']['steps'].current
}), {'wizard_goto_step': response.context['wizard']['steps'].prev})
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_jump(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form3'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
def test_form_finish(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
with open(__file__, 'rb') as post_file:
post_data['form2-file1'] = post_file
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
# Check that the file got uploaded properly.
with open(__file__, 'rb') as f, temp_storage.open(UPLOADED_FILE_NAME) as f2:
self.assertEqual(f.read(), f2.read())
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
# After the wizard is done no files should exist anymore.
self.assertFalse(temp_storage.exists(UPLOADED_FILE_NAME))
all_data = response.context['form_list']
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': 'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': '123 Main St', 'address2': 'Djangoland'},
{'random_crap': 'blah blah'},
[{'random_crap': 'blah blah'}, {'random_crap': 'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
with open(__file__, 'rb') as post_file:
post_data['form2-file1'] = post_file
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertTrue(temp_storage.exists(UPLOADED_FILE_NAME))
step2_url = reverse(self.wizard_urlname, kwargs={'step': 'form2'})
response = self.client.get(step2_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
with open(__file__, 'rb') as f, temp_storage.open(UPLOADED_FILE_NAME) as f2:
self.assertEqual(f.read(), f2.read())
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
self.assertEqual(all_data['file1'].name, UPLOADED_FILE_NAME)
self.assertTrue(all_data['file1'].closed)
self.assertFalse(temp_storage.exists(UPLOADED_FILE_NAME))
del all_data['file1']
self.assertEqual(
all_data,
{'name': 'Pony', 'thirsty': True, 'user': self.testuser,
'address1': '123 Main St', 'address2': 'Djangoland',
'random_crap': 'blah blah', 'formset-form4': [
{'random_crap': 'blah blah'},
{'random_crap': 'blah blah'}
]})
form_dict = response.context['form_dict']
self.assertIn('form1', form_dict.keys())
self.assertIn('form2', form_dict.keys())
self.assertEqual(form_dict['form1'].cleaned_data, response.context['form_list'][0])
def test_manipulated_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
with open(__file__, 'rb') as post_file:
post_data['form2-file1'] = post_file
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
loc = response.url
response = self.client.get(loc)
self.assertEqual(response.status_code, 200, loc)
self.client.cookies.pop('sessionid', None)
self.client.cookies.pop('wizard_cookie_contact_wizard', None)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_reset(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.get(
'%s?reset=1' % reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
@override_settings(ROOT_URLCONF='tests.wizard.namedwizardtests.urls')
| NamedWizardTests |
python | wandb__wandb | wandb/sdk/artifacts/_generated/project_artifact_collection.py | {
"start": 532,
"end": 861
} | class ____(GQLResult):
artifact_collection: Optional[ArtifactCollectionFragment] = Field(
alias="artifactCollection"
)
ProjectArtifactCollection.model_rebuild()
ProjectArtifactCollectionProject.model_rebuild()
ProjectArtifactCollectionProjectArtifactType.model_rebuild()
| ProjectArtifactCollectionProjectArtifactType |
python | pytorch__pytorch | test/distributed/pipelining/test_schedule_multiproc.py | {
"start": 7113,
"end": 37888
} | class ____(MultiProcContinuousTest):
world_size = 4
@classmethod
def backend_str(cls) -> str:
# Testing with NCCL backend
return backend
@property
def device(self) -> torch.device:
return torch.device(device_type, self.rank)
@property
def config(self) -> PipelineTestConfig:
"""Lazily create and return the pipeline test configuration."""
return PipelineTestConfig(
world_size=self.world_size, device=self.device, rank=self.rank
)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize("ScheduleClass", [_ScheduleForwardOnly])
@skip_if_lt_x_gpu(4)
def test_forward_only(self, ScheduleClass):
mod, mod_ref, x, _, _ = setup_models_and_data(self.config)
x_clone = x.clone()
num_microbatches = 2 * self.world_size
stage, _, _ = create_single_stage_pipeline(
self.config, mod, x, num_microbatches
)
schedule = ScheduleClass(stage, num_microbatches, scale_grads=False)
# Run forward-only schedule
out = None
num_iters = 20
for _ in range(num_iters):
if self.rank == 0:
schedule.step(x)
dist.recv(x, src=self.world_size - 1)
elif self.rank == self.world_size - 1:
out = schedule.step()
dist.send(out, dst=0)
else:
schedule.step()
# Validate pipelined output matches reference model
if self.rank == self.world_size - 1:
for _ in range(num_iters):
x_clone = mod_ref(x_clone)
torch.testing.assert_close(x_clone, out)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize(
"ScheduleClass",
[
ScheduleGPipe,
Schedule1F1B,
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
ScheduleInterleavedZeroBubble,
],
)
@skip_if_lt_x_gpu(4)
def test_eval_inference_mode(self, ScheduleClass):
num_microbatches = 4
if ScheduleClass in [
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
ScheduleInterleavedZeroBubble,
]:
# Multi-stage schedules
stages_per_rank = 2
n_stages = stages_per_rank * self.world_size
mod, _, x, target, loss_fn = setup_models_and_data(
self.config, n_layers=n_stages
)
# Create multi-stage pipeline
stages, stage_modules, _ = create_multi_stage_pipeline(
self.config, mod, stages_per_rank, n_stages
)
schedule = ScheduleClass(
stages, num_microbatches, loss_fn=loss_fn, scale_grads=False
)
else:
# Single-stage schedules
mod, _, x, target, loss_fn = setup_models_and_data(self.config)
# Create single-stage pipeline
stage, stage_module, _ = create_single_stage_pipeline(
self.config, mod, x, num_microbatches
)
stage_modules = [stage_module]
schedule = ScheduleClass(
stage, num_microbatches, loss_fn=loss_fn, scale_grads=False
)
# Clear gradients and run eval
zero_gradients(stage_modules)
losses = []
if self.rank == 0:
# Support with and without no_grad()
with torch.no_grad():
schedule.eval(x)
elif self.rank == self.world_size - 1:
schedule.eval(target=target, losses=losses)
else:
schedule.eval()
# Check that gradients were NOT computed during eval
grad_computed_eval = any(
param.grad is not None
for stage_module in stage_modules
for param in stage_module.parameters()
)
# Verify that gradients were not computed during eval
self.assertFalse(
grad_computed_eval, "Gradients should not be computed during eval()"
)
# Verify that losses are still computed during eval
if self.rank == self.world_size - 1:
self.assertTrue(len(losses) > 0, "Losses should be computed during eval()")
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize(
"ScheduleClass",
[
ScheduleGPipe,
Schedule1F1B,
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
ScheduleInterleavedZeroBubble,
],
)
@skip_if_lt_x_gpu(4)
def test_return_output(self, ScheduleClass):
num_microbatches = 4
if ScheduleClass in [
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
ScheduleInterleavedZeroBubble,
]:
# Multi-stage schedules
stages_per_rank = 2
n_stages = stages_per_rank * self.world_size
mod, _, x, target, loss_fn = setup_models_and_data(
self.config, n_layers=n_stages
)
# Create multi-stage pipeline
stages, stage_modules, _ = create_multi_stage_pipeline(
self.config, mod, stages_per_rank, n_stages
)
schedule = ScheduleClass(
stages,
num_microbatches,
loss_fn=loss_fn,
scale_grads=False,
)
else:
# Single-stage schedules
mod, _, x, target, loss_fn = setup_models_and_data(self.config)
# Create single-stage pipeline
stage, stage_module, _ = create_single_stage_pipeline(
self.config, mod, x, num_microbatches
)
schedule = ScheduleClass(
stage,
num_microbatches,
loss_fn=loss_fn,
scale_grads=False,
)
losses = []
if self.rank == self.world_size - 1:
output = schedule.step(target=target, losses=losses, return_outputs=False)
else:
schedule.step(x)
# Verify that output is None
if self.rank == self.world_size - 1:
self.assertTrue(output is None, "Output should be None")
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize("ScheduleClass", [ScheduleGPipe, Schedule1F1B])
@skip_if_lt_x_gpu(4)
def test_multi_iter(self, ScheduleClass):
mod, _, x, target, loss_fn = setup_models_and_data(self.config)
chunks = 4
stage, _, _ = create_single_stage_pipeline(self.config, mod, x, chunks)
schedule = ScheduleClass(stage, chunks, loss_fn=loss_fn, scale_grads=False)
# Run
for _ in range(20):
if self.rank == 0:
schedule.step(x)
elif self.rank == self.world_size - 1:
losses = []
schedule.step(target=target, losses=losses)
else:
schedule.step()
dist.barrier(device_ids=[self.rank])
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize("ScheduleClass", [ScheduleGPipe, Schedule1F1B])
@skip_if_lt_x_gpu(4)
def test_kwargs_with_tracer(self, ScheduleClass):
mod = ModelWithKwargs(d_hid, splits=self.world_size)
mod.to(self.device)
x = torch.randn(batch_size, d_hid, device=self.device)
y = torch.randn(batch_size, d_hid, device=self.device)
target = torch.randn(batch_size, d_hid, device=self.device)
loss_fn = torch.nn.MSELoss(reduction="sum")
chunks = 4
x_mb = x.chunk(chunks)[0]
y_mb = y.chunk(chunks)[0]
pipe = pipeline(
mod,
mb_args=(x_mb,),
mb_kwargs={"y": y_mb},
)
stage = pipe.build_stage(
self.rank,
self.device,
)
# Attach to a schedule
schedule = ScheduleClass(stage, chunks, loss_fn=loss_fn, scale_grads=False)
# Run
out = None
losses = []
if self.rank == 0:
schedule.step(x, y=y)
elif self.rank == self.world_size - 1:
out = schedule.step(target=target, losses=losses)
else:
schedule.step()
dist.barrier(device_ids=[self.rank])
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = mod(x, y=y)
ref_loss = loss_fn(ref_out, target)
pipe_loss = sum(losses)
torch.testing.assert_close(out, ref_out, rtol=1e-2, atol=5e-3)
torch.testing.assert_close(pipe_loss, ref_loss)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize("ScheduleClass", [ScheduleGPipe, Schedule1F1B])
@skip_if_lt_x_gpu(4)
def test_grad_with_tracer(self, ScheduleClass):
mod, ref_mod, x, target, loss_fn = setup_models_and_data(self.config)
# Run reference
ref_out, ref_loss = run_reference_model(ref_mod, x, target, loss_fn)
# Create pipeline and schedule
chunks = 2 * self.world_size
stage, stage_module, stage_modules = create_single_stage_pipeline(
self.config, mod, x, chunks
)
schedule = ScheduleClass(stage, chunks, loss_fn=loss_fn, scale_grads=False)
# Run pipeline
out = None
losses = []
for _ in range(2):
zero_gradients(stage_module)
if self.rank == 0:
schedule.step(x)
elif self.rank == self.world_size - 1:
out = schedule.step(target=target, losses=losses)
else:
schedule.step()
dist.barrier(device_ids=[self.rank])
# Last rank checks result
if self.rank == self.world_size - 1:
torch.testing.assert_close(out, ref_out)
pipe_loss = sum(losses)
torch.testing.assert_close(pipe_loss, ref_loss)
# Check gradients using helper method
check_gradients(self.config, stage_module, ref_mod)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize("ScheduleClass", [ScheduleGPipe, Schedule1F1B])
@parametrize("shape_inference", [True, False])
@skip_if_lt_x_gpu(4)
def test_grad_with_manual(self, ScheduleClass, shape_inference):
mod, ref_mod, x, target, loss_fn = setup_models_and_data(self.config)
# Run reference
ref_out, ref_loss = run_reference_model(ref_mod, x, target, loss_fn)
# Create manual pipeline stage
chunks = 2 * self.world_size
stage, stage_module, _ = create_single_stage_pipeline(
self.config, mod, x, chunks, use_tracer=False
)
# Handle shape inference
if not shape_inference:
input_args = (x.chunk(chunks)[0],)
with torch.no_grad():
output_args = stage_module(*input_args)
stage = PipelineStage(
stage_module,
self.rank,
self.world_size,
self.device,
input_args=input_args,
output_args=output_args,
)
schedule = ScheduleClass(stage, chunks, loss_fn=loss_fn, scale_grads=False)
# Run pipeline
out = None
losses = []
for _ in range(2):
zero_gradients(stage_module)
if self.rank == 0:
schedule.step(x)
elif self.rank == self.world_size - 1:
out = schedule.step(target=target, losses=losses)
else:
schedule.step()
dist.barrier(device_ids=[self.rank])
# Last rank checks result
if self.rank == self.world_size - 1:
torch.testing.assert_close(out, ref_out)
pipe_loss = sum(losses)
torch.testing.assert_close(pipe_loss, ref_loss)
# Check gradients using helper method
check_gradients(self.config, stage_module, ref_mod)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize(
"ScheduleClass",
[
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
ScheduleInterleavedZeroBubble,
],
)
@skip_if_lt_x_gpu(4)
def test_grad_with_manual_interleaved(self, ScheduleClass):
stages_per_rank = 2
n_stages = stages_per_rank * self.world_size
mod, ref_mod, x, target, loss_fn = setup_models_and_data(
self.config, n_layers=n_stages
)
# Run reference
ref_out, ref_loss = run_reference_model(ref_mod, x, target, loss_fn)
# Create multi-stage pipeline
stages, stage_modules, submod_names = create_multi_stage_pipeline(
self.config, mod, stages_per_rank, n_stages
)
print(f"Rank {self.rank} stages: {[stage.stage_index for stage in stages]}")
num_microbatches = (
ScheduleClass.num_microbatches
if hasattr(ScheduleClass, "num_microbatches")
else 2 * self.world_size
)
# Create schedule
schedule = ScheduleClass(
stages, num_microbatches, loss_fn=loss_fn, scale_grads=False
)
# Run pipeline with tensor leak checking
out = None
losses = []
with check_leaked_tensors() as garbage_tensors:
for _ in range(2):
zero_gradients(stage_modules)
if self.rank == 0:
schedule.step(x)
elif self.rank == self.world_size - 1:
out = schedule.step(target=target, losses=losses)
else:
schedule.step()
self.assertEqual(
len(garbage_tensors),
0,
"Found leaked tensors, check logs above for debug info",
)
dist.barrier()
# Verify results
if self.rank == self.world_size - 1:
torch.testing.assert_close(out, ref_out)
pipe_loss = sum(losses)
torch.testing.assert_close(pipe_loss, ref_loss)
# Check gradients - use relaxed tolerances for interleaved schedules
# since gradients are small
check_gradients(
self.config, stage_modules, ref_mod, submod_names, rtol=5e-3, atol=5e-3
)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize("ScheduleClass", [ScheduleInterleavedZeroBubble])
@skip_if_lt_x_gpu(4)
def test_schedule_with_weight_update_mlp_e2e(self, ScheduleClass):
stages_per_rank = 2
n_stages = stages_per_rank * self.world_size
full_mod, ref_mod, x, target, _ = setup_models_and_data(
self.config, n_layers=n_stages, model_class=MultiMLPWithDw
)
full_mod.toggle()
loss_fn = MSELoss()
# Run reference
ref_out, ref_loss = run_reference_model(ref_mod, x, target, loss_fn)
# Create multi-stage pipeline with custom dw_builder
stages, stage_modules, submod_names = create_multi_stage_pipeline(
self.config, full_mod, stages_per_rank, n_stages
)
class CustomState:
def __init__(self, stage_module, stage_idx, rank):
self.i = 0
self.stage_module = stage_module
self.stage_idx = stage_idx
self.rank = rank
def dw_builder(self):
def dw_runner():
self.i += 1
print(
f"[Rank {self.rank}] dw_count={self.i} stage={self.stage_idx}"
)
self.stage_module.compute_dW()
return dw_runner
# Create custom states and rebuild stages with dw_builder
cs = {}
stage_indices = [
self.rank + i * self.world_size for i in range(stages_per_rank)
]
for stage_module, stage_idx in zip(stage_modules, stage_indices):
cs[stage_idx] = CustomState(stage_module, stage_idx, self.rank)
stages = [
PipelineStage(
stage_module,
stage_idx,
n_stages,
self.device,
dw_builder=cs[stage_idx].dw_builder,
)
for stage_module, stage_idx in zip(stage_modules, stage_indices)
]
schedule = ScheduleClass(stages, 2, loss_fn=loss_fn)
# Run pipeline
out = None
losses = []
for _ in range(2):
zero_gradients(stage_modules)
if self.rank == 0:
schedule.step(x)
elif self.rank == self.world_size - 1:
out = schedule.step(target=target, losses=losses)
else:
schedule.step()
dist.barrier(device_ids=[self.rank])
# Verify results
if self.rank == self.world_size - 1:
torch.testing.assert_close(out, ref_out)
pipe_loss = sum(losses) / len(losses)
torch.testing.assert_close(pipe_loss, ref_loss)
# Check gradients using helper method
check_gradients(self.config, stage_modules, ref_mod, submod_names)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize(
"schedule_class",
[ScheduleZBVZeroBubble, ScheduleDualPipeV],
)
@skip_if_lt_x_gpu(4)
def test_v_shape_schedules(self, schedule_class):
n_stages = 8
rank_stages = {0: [0, 7], 1: [1, 6], 2: [2, 5], 3: [3, 4]}
mod, ref_mod, x, target, loss_fn = setup_models_and_data(
self.config, n_layers=n_stages
)
# Run reference
ref_out, ref_loss = run_reference_model(ref_mod, x, target, loss_fn)
# Create multi-stage pipeline with custom stage indices
num_microbatches = 8
stage_indices = rank_stages[self.rank]
stages, stage_modules, submod_names = create_multi_stage_pipeline(
self.config, mod, len(stage_indices), n_stages, stage_indices
)
schedule = schedule_class(
stages, num_microbatches, loss_fn=loss_fn, scale_grads=False
)
# Run pipeline - special case where first and last stage are on rank 0
out = None
losses = []
for _ in range(2):
zero_gradients(stage_modules)
if self.rank == 0:
out = schedule.step(x, target=target, losses=losses)
else:
schedule.step()
# Verify results (rank 0 has both first and last stages)
if self.rank == 0:
torch.testing.assert_close(out, ref_out)
pipe_loss = sum(losses)
torch.testing.assert_close(pipe_loss, ref_loss)
# Check gradients using helper method
check_gradients(self.config, stage_modules, ref_mod, submod_names)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@skip_if_lt_x_gpu(4)
def test_custom_function_callback(self):
"""Test the custom function callback functionality with _PipelineScheduleRuntime."""
n_stages = 8
rank_stages = {0: [0, 7], 1: [1, 6], 2: [2, 5], 3: [3, 4]}
mod, ref_mod, x, target, loss_fn = setup_models_and_data(
self.config, n_layers=n_stages
)
# Run reference
ref_out, ref_loss = run_reference_model(ref_mod, x, target, loss_fn)
# Create multi-stage pipeline with custom stage indices
num_microbatches = 8
stage_indices = rank_stages[self.rank]
stages, stage_modules, submod_names = create_multi_stage_pipeline(
self.config, mod, len(stage_indices), n_stages, stage_indices
)
# Use DualPipeV schedule as the base schedule
base_schedule = ScheduleDualPipeV(
stages, num_microbatches, loss_fn=loss_fn, scale_grads=False
)
base_schedule._prepare_schedule_with_comms(base_schedule.pipeline_order)
# Track both types of callbacks separately
forward_calls = []
overlap_calls = []
def forward_callback(action: _Action, ctx: _PipelineContext):
"""Custom callback for FORWARD computation that mimics the original implementation."""
schedule = ctx.schedule_ref
assert isinstance(schedule, _PipelineScheduleRuntime)
stage_index_to_stage: dict[int, _PipelineStageBase] = {
stage.stage_index: stage for stage in schedule._stages
}
stage = stage_index_to_stage[action.stage_index]
stage_index = stage.stage_index
mb_index = action.microbatch_index
assert mb_index is not None
fwd_recv_ops = schedule.fwd_recv_ops
arg_mbs = ctx.arg_mbs
kwarg_mbs = ctx.kwarg_mbs
is_next_stage_on_this_rank = stage_index + 1 in stage_index_to_stage
is_prev_stage_on_this_rank = stage_index - 1 in stage_index_to_stage
# used in verification at the end
forward_calls.append((stage_index, mb_index))
if (
not stage.is_first
# no recv op expected for V-schedule special case (see [Note: V-schedule special case])
and not is_prev_stage_on_this_rank
):
assert (
stage_index,
mb_index,
) in fwd_recv_ops, f"Computing {action=} before receiving input"
from torch.distributed.pipelining.schedules import _wait_batch_p2p
_wait_batch_p2p(fwd_recv_ops.pop((stage_index, mb_index)))
output = stage.forward_one_chunk(
mb_index,
arg_mbs[mb_index], # type: ignore[index]
kwarg_mbs[mb_index], # type: ignore[index]
)
schedule._maybe_compute_loss(stage, output, ctx.target_mbs, mb_index)
# SEND/RECV op are avoided for special case with 2 adjacent stages on same rank
# see [Note: V-schedule special case]
if is_next_stage_on_this_rank:
stage_index_to_stage[stage_index + 1].set_local_fwd_input(
output, mb_index
)
def overlap_callback(action: _Action, ctx: _PipelineContext):
"""Custom callback for OVERLAP_F_B computation that mimics the original implementation."""
schedule = ctx.schedule_ref
assert isinstance(schedule, _PipelineScheduleRuntime)
stage_index_to_stage: dict[int, _PipelineStageBase] = {
stage.stage_index: stage for stage in schedule._stages
}
assert action.sub_actions is not None
fwd_action = action.sub_actions[0]
bwd_action = action.sub_actions[1]
# Forward ========================================================
forward_callback(fwd_action, ctx)
overlap_calls.append(
(
fwd_action.stage_index,
fwd_action.microbatch_index,
bwd_action.stage_index,
bwd_action.microbatch_index,
)
)
# Backward ========================================================
backward_stage_index = bwd_action.stage_index
backward_stage = stage_index_to_stage[backward_stage_index]
backward_mb_index = bwd_action.microbatch_index
assert backward_mb_index is not None
bwd_recv_ops = schedule.bwd_recv_ops
is_next_stage_on_this_rank = (
backward_stage.stage_index + 1 in stage_index_to_stage
)
is_prev_stage_on_this_rank = (
backward_stage.stage_index - 1 in stage_index_to_stage
)
if (
not backward_stage.is_last
# no recv op expected for V-schedule special case (see [Note: V-schedule special case])
and not is_next_stage_on_this_rank
):
assert (
backward_stage_index,
backward_mb_index,
) in bwd_recv_ops, (
f"Attempted to run compute {action=} before receiving input"
)
_wait_batch_p2p(
bwd_recv_ops.pop((backward_stage_index, backward_mb_index))
)
loss = schedule._maybe_get_loss(backward_stage, backward_mb_index)
schedule.backward_counter[backward_stage_index] += 1
last_backward = (
schedule.backward_counter[backward_stage_index]
== schedule._n_microbatches
)
grad_scale_factor = schedule._n_microbatches if schedule.scale_grads else 1
backward_stage.backward_one_chunk(
backward_mb_index,
loss=loss,
full_backward=True,
last_backward=last_backward,
)
if last_backward:
backward_stage.scale_grads(grad_scale_factor)
# SEND/RECV op are avoided for special case with 2 adjacent stages on same rank
# see [Note: V-schedule special case]
if is_prev_stage_on_this_rank:
stage_index_to_stage[backward_stage_index - 1].set_local_bwd_input(
backward_stage.get_local_bwd_output(backward_mb_index),
backward_mb_index,
)
# Add the callback for FORWARD computation type
base_schedule.register_custom_function(FORWARD, forward_callback)
base_schedule.register_custom_function(OVERLAP_F_B, overlap_callback)
# Run pipeline - special case where first and last stage are on rank 0
out = None
losses = []
num_loops = 2
for _ in range(num_loops):
zero_gradients(stage_modules)
if self.rank == 0:
out = base_schedule.step(x, target=target, losses=losses)
else:
base_schedule.step()
dist.barrier()
# Verify results (rank 0 has both first and last stages)
if self.rank == 0:
torch.testing.assert_close(out, ref_out)
pipe_loss = sum(losses)
torch.testing.assert_close(pipe_loss, ref_loss)
# Verify overlap callbacks were called
self.assertGreater(
len(overlap_calls), 0, "OVERLAP_F_B callback should have been called"
)
# In a V-schedule with 8 microbatches and 2 stages per rank,
# rank 0 should have 32 calls (8 microbatches * 2 stages * 2 loops)
expected_count = num_microbatches * 2 * num_loops
self.assertEqual(len(forward_calls), expected_count)
# Verify all callback calls are for stages on this rank
for stage_idx, _ in forward_calls:
self.assertIn(
stage_idx,
stage_indices,
f"Callback called for stage {stage_idx} not on rank {self.rank}",
)
# Check gradients using helper method
check_gradients(self.config, stage_modules, ref_mod, submod_names)
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, "NCCL test requires 2+ GPUs"
)
@parametrize(
"ScheduleClass",
[ScheduleInterleavedZeroBubble, ScheduleInterleaved1F1B],
)
@skip_if_lt_x_gpu(4)
def test_zero_bubble_with_model_kwargs(self, ScheduleClass):
stages_per_rank = 2
n_stages = stages_per_rank * self.world_size
mod, ref_mod, x, target, loss_fn = setup_models_and_data(
self.config, n_layers=n_stages, model_class=MultiMLPKwargs
)
unused_kwarg = torch.tensor([1.0], device=self.device)
# Run reference with kwargs
ref_out, ref_loss = run_reference_model(
ref_mod, x, target, loss_fn, unused_kwarg=unused_kwarg
)
# Create multi-stage pipeline
stages, stage_modules, submod_names = create_multi_stage_pipeline(
self.config, mod, stages_per_rank, n_stages
)
num_microbatches = (
ScheduleClass.num_microbatches
if hasattr(ScheduleClass, "num_microbatches")
else 2 * self.world_size
)
schedule = ScheduleClass(
stages, num_microbatches, loss_fn=loss_fn, scale_grads=False
)
# Run pipeline with kwargs
out = None
losses = []
for _ in range(2):
zero_gradients(stage_modules)
if self.rank == 0:
schedule.step(
x,
unused_kwarg=unused_kwarg.clone()
.unsqueeze(0)
.expand(num_microbatches, -1),
)
elif self.rank == self.world_size - 1:
out = schedule.step(target=target, losses=losses)
else:
schedule.step()
dist.barrier()
# Verify results
if self.rank == self.world_size - 1:
torch.testing.assert_close(out, ref_out)
pipe_loss = sum(losses)
torch.testing.assert_close(pipe_loss, ref_loss)
# Check gradients using helper method
check_gradients(
self.config, stage_modules, ref_mod, submod_names, rtol=3e-5, atol=5e-3
)
instantiate_parametrized_tests(ScheduleTest)
| ScheduleTest |
python | walkccc__LeetCode | solutions/323. Number of Connected Components in an Undirected Graph/323.py | {
"start": 0,
"end": 569
} | class ____:
def countComponents(self, n: int, edges: list[list[int]]) -> int:
ans = 0
graph = [[] for _ in range(n)]
seen = set()
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
def bfs(node: int, seen: set[int]) -> None:
q = collections.deque([node])
seen.add(node)
while q:
u = q.pop()
for v in graph[u]:
if v not in seen:
q.append(v)
seen.add(v)
for i in range(n):
if i not in seen:
bfs(i, seen)
ans += 1
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/map_test.py | {
"start": 5330,
"end": 5695
} | class ____:
mask: bool
value: tensor.Tensor
def __tf_flatten__(self):
metadata = (self.mask,)
components = (self.value,)
return metadata, components
@classmethod
def __tf_unflatten__(cls, metadata, components):
mask = metadata[0]
value = components[0]
return MaskedTensor(mask=mask, value=value)
@dataclasses.dataclass
| MaskedTensor |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 744,
"end": 1479
} | class ____(BaseModel):
type: Literal["BasicHttpAuthenticator"]
username: str = Field(
...,
description="The username that will be combined with the password, base64 encoded and used to make requests. Fill it in the user inputs.",
examples=["{{ config['username'] }}", "{{ config['api_key'] }}"],
title="Username",
)
password: Optional[str] = Field(
"",
description="The password that will be combined with the username, base64 encoded and used to make requests. Fill it in the user inputs.",
examples=["{{ config['password'] }}", ""],
title="Password",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| BasicHttpAuthenticator |
python | scrapy__scrapy | tests/test_logformatter.py | {
"start": 9593,
"end": 9796
} | class ____:
drop = True
def process_item(self, item):
if self.drop:
self.drop = False
raise DropItem("Ignoring item")
self.drop = True
| DropSomeItemsPipeline |
python | python__mypy | mypy/nodes.py | {
"start": 98375,
"end": 99054
} | class ____(Expression):
"""Named tuple expression Enum('name', 'val1 val2 ...')."""
__slots__ = ("info", "items", "values")
__match_args__ = ("info", "items", "values")
# The class representation of this enumerated type
info: TypeInfo
# The item names (for debugging)
items: list[str]
values: list[Expression | None]
def __init__(self, info: TypeInfo, items: list[str], values: list[Expression | None]) -> None:
super().__init__()
self.info = info
self.items = items
self.values = values
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_enum_call_expr(self)
| EnumCallExpr |
python | mlflow__mlflow | tests/tracing/display/test_ipython.py | {
"start": 358,
"end": 637
} | class ____:
def __init__(self):
self.events = defaultdict(list)
def register(self, event, callback):
self.events[event].append(callback)
def trigger(self, event):
for callback in self.events[event]:
callback(None)
| MockEventRegistry |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_dms.py | {
"start": 5462,
"end": 6900
} | class ____(TestBaseDmsTrigger):
EXPECTED_WAITER_NAME = "replication_stopped"
REPLICATION_CONFIG_ARN = "arn:aws:dms:region:account:config"
def test_serialization(self):
trigger = DmsReplicationStoppedTrigger(replication_config_arn=self.REPLICATION_CONFIG_ARN)
classpath, kwargs = trigger.serialize()
assert classpath == BASE_TRIGGER_CLASSPATH + "DmsReplicationStoppedTrigger"
""" assert kwargs.get("Filters") == [
{"Name": "replication-config-arn", "Values": ["arn:aws:dms:region:account:config"]}
] """
assert kwargs.get("replication_config_arn") == self.REPLICATION_CONFIG_ARN
@pytest.mark.asyncio
@mock.patch.object(DmsHook, "get_waiter")
@mock.patch.object(DmsHook, "get_async_conn")
async def test_complete(self, mock_async_conn, mock_get_waiter):
mock_async_conn.__aenter__.return_value = mock.MagicMock()
mock_get_waiter().wait = AsyncMock()
trigger = DmsReplicationStoppedTrigger(replication_config_arn=self.REPLICATION_CONFIG_ARN)
generator = trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent(
{"status": "success", "replication_config_arn": self.REPLICATION_CONFIG_ARN}
)
assert_expected_waiter_type(mock_get_waiter, self.EXPECTED_WAITER_NAME)
mock_get_waiter().wait.assert_called_once()
| TestDmsReplicationStoppedTrigger |
python | openai__openai-python | src/openai/types/realtime/realtime_audio_config.py | {
"start": 320,
"end": 467
} | class ____(BaseModel):
input: Optional[RealtimeAudioConfigInput] = None
output: Optional[RealtimeAudioConfigOutput] = None
| RealtimeAudioConfig |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/list1.py | {
"start": 685,
"end": 2249
} | class ____:
baz: Baz[list[Foo]]
v10 = Bar()
reveal_type(v10.baz, expected_text="list[Foo]")
v10.baz = [Foo()]
reveal_type(v10.baz, expected_text="list[Foo]")
v11: list[Any] = [["hi", ["hi"], [[{}]]]]
reveal_type(v11, expected_text="list[Any]")
v12: list[int | None] = [None] * 3
reveal_type(v12, expected_text="list[int | None]")
v13: list[str | None] = ["3", None] * 2
reveal_type(v13, expected_text="list[str | None]")
x1 = 3
v14: list[str | None] = [None] * x1
x2 = [1, 2, 3]
v15: list[str | None] = [None] * sum(x2)
v16: dict[str, list[str | None]] = {n: [None] * len(n) for n in ["a", "aa", "aaa"]}
ScalarKeysT = TypeVar("ScalarKeysT", bound=Literal["name", "country"])
def func1(by: list[ScalarKeysT]) -> ScalarKeysT: ...
reveal_type(func1(["country"]), expected_type="Literal['country']")
reveal_type(func1(["name"]), expected_type="Literal['name']")
reveal_type(func1(["name", "country"]), expected_type="Literal['name', 'country']")
# This should generate an error.
func1(["id"])
def func2(thing: str | list[str | int] | list[list[str | int]]): ...
func2("")
func2(["", 0])
func2([["", 0], ["", 0]])
func2([[""]])
def func3(value: _T) -> list[_T]:
to_add = [value, str(value)]
# This should generate an error.
return to_add
def func4(value: _T) -> list[_T]:
# This should generate an error.
return [value, str(value)]
def func5():
v1: Sequence[int | str] = [1]
reveal_type(v1, expected_text="list[int]")
v2: MutableSequence[int | str] = [1]
reveal_type(v2, expected_text="list[int | str]")
| Bar |
python | ray-project__ray | python/ray/serve/tests/test_telemetry_1.py | {
"start": 12393,
"end": 15926
} | class ____:
def test_both_proxies_detected(manage_ray, ray_shutdown):
"""Test that both HTTP and gRPC proxies are detected by telemetry.
When both HTTP and gRPC proxies are used, both telemetry should be detected.
"""
result = get_extra_usage_tags_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client()
)
report = {"extra_usage_tags": result}
# Ensure neither the HTTP nor gRPC proxy telemetry exist.
assert ServeUsageTag.HTTP_PROXY_USED.get_value_from_report(report) is None
assert ServeUsageTag.GRPC_PROXY_USED.get_value_from_report(report) is None
grpc_servicer_functions = [
"ray.serve.generated.serve_pb2_grpc."
"add_UserDefinedServiceServicer_to_server",
]
serve.start(grpc_options={"grpc_servicer_functions": grpc_servicer_functions})
result = get_extra_usage_tags_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client()
)
report = {"extra_usage_tags": result}
# Ensure both HTTP and gRPC proxy telemetry exist.
assert int(ServeUsageTag.HTTP_PROXY_USED.get_value_from_report(report)) == 1
assert int(ServeUsageTag.GRPC_PROXY_USED.get_value_from_report(report)) == 1
def test_only_http_proxy_detected(manage_ray, ray_shutdown):
"""Test that only HTTP proxy is detected by telemetry.
When only HTTP proxy is used, only the http proxy telemetry should be detected.
"""
result = get_extra_usage_tags_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client()
)
report = {"extra_usage_tags": result}
# Ensure the telemetry does not yet exist.
assert ServeUsageTag.HTTP_PROXY_USED.get_value_from_report(report) is None
assert ServeUsageTag.GRPC_PROXY_USED.get_value_from_report(report) is None
serve.start()
result = get_extra_usage_tags_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client()
)
report = {"extra_usage_tags": result}
# Ensure only the HTTP proxy telemetry exist.
assert int(ServeUsageTag.HTTP_PROXY_USED.get_value_from_report(report)) == 1
assert ServeUsageTag.GRPC_PROXY_USED.get_value_from_report(report) is None
def test_no_proxy_detected(manage_ray, ray_shutdown):
"""Test that no proxy is detected by telemetry.
When neither HTTP nor gRPC proxy is used, no proxy telemetry should be detected.
"""
result = get_extra_usage_tags_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client()
)
report = {"extra_usage_tags": result}
# Ensure neither the HTTP nor gRPC proxy telemetry exist.
assert ServeUsageTag.HTTP_PROXY_USED.get_value_from_report(report) is None
assert ServeUsageTag.GRPC_PROXY_USED.get_value_from_report(report) is None
serve.start(http_options={"location": "NoServer"})
result = get_extra_usage_tags_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client()
)
report = {"extra_usage_tags": result}
# Ensure neither the HTTP nor gRPC proxy telemetry exist.
assert ServeUsageTag.HTTP_PROXY_USED.get_value_from_report(report) is None
assert ServeUsageTag.GRPC_PROXY_USED.get_value_from_report(report) is None
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestProxyTelemetry |
python | davidhalter__jedi | test/completion/precedence.py | {
"start": 1304,
"end": 2501
} | class ____(object):
fuu = 0.1
raboof = 'fourtytwo'
# targets should be working
target = ''
for char in ['f', 'u', 'u']:
target += char
#? float()
getattr(FooBar, target)
# github #24
target = u''
for char in reversed(['f', 'o', 'o', 'b', 'a', 'r']):
target += char
#? str()
getattr(FooBar, target)
# -----------------
# repetition problems -> could be very slow and memory expensive - shouldn't
# be.
# -----------------
b = [str(1)]
l = list
for x in [l(0), l(1), l(2), l(3), l(4), l(5), l(6), l(7), l(8), l(9), l(10),
l(11), l(12), l(13), l(14), l(15), l(16), l(17), l(18), l(19), l(20),
l(21), l(22), l(23), l(24), l(25), l(26), l(27), l(28), l(29)]:
b += x
#? str()
b[1]
# -----------------
# undefined names
# -----------------
a = foobarbaz + 'hello'
#? int() float()
{'hello': 1, 'bar': 1.0}[a]
# -----------------
# stubs
# -----------------
from datetime import datetime, timedelta
#?
(datetime - timedelta)
#? datetime()
(datetime() - timedelta())
#? timedelta()
(datetime() - datetime())
#? timedelta()
(timedelta() - datetime())
#? timedelta()
(timedelta() - timedelta())
# -----------------
# magic methods
# -----------------
| FooBar |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 1192808,
"end": 1204114
} | class ____(TypedDict, total=False):
"""
Encoding channels map properties of the data to visual properties of the chart.
Parameters
----------
angle
Rotation angle of point and text marks.
color
Color of the marks - either fill or stroke color based on the ``filled`` property
of mark definition. By default, ``color`` represents fill color for ``"area"``,
``"bar"``, ``"tick"``, ``"text"``, ``"trail"``, ``"circle"``, and ``"square"`` /
stroke color for ``"line"`` and ``"point"``.
**Default value:** If undefined, the default color depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s ``color``
property.
*Note:* 1) For fine-grained control over both fill and stroke colors of the marks,
please use the ``fill`` and ``stroke`` channels. The ``fill`` or ``stroke``
encodings have higher precedence than ``color``, thus may override the ``color``
encoding if conflicting encodings are specified. 2) See the scale documentation for
more information about customizing `color scheme
<https://vega.github.io/vega-lite/docs/scale.html#scheme>`__.
column
A field definition for the horizontal facet of trellis plots.
description
A text description of this mark for ARIA accessibility (SVG output only). For SVG
output the ``"aria-label"`` attribute will be set to this description.
detail
Additional levels of detail for grouping data in aggregate views and in line, trail,
and area marks without mapping data to a specific visual channel.
facet
A field definition for the (flexible) facet of trellis plots.
If either ``row`` or ``column`` is specified, this channel will be ignored.
fill
Fill color of the marks. **Default value:** If undefined, the default color depends
on `mark config <https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s
``color`` property.
*Note:* The ``fill`` encoding has higher precedence than ``color``, thus may
override the ``color`` encoding if conflicting encodings are specified.
fillOpacity
Fill opacity of the marks.
**Default value:** If undefined, the default opacity depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s ``fillOpacity``
property.
href
A URL to load upon mouse click.
key
A data field to use as a unique key for data binding. When a visualization's data is
updated, the key value will be used to match data elements to existing mark
instances. Use a key channel to enable object constancy for transitions over dynamic
data.
latitude
Latitude position of geographically projected marks.
latitude2
Latitude-2 position for geographically projected ranged ``"area"``, ``"bar"``,
``"rect"``, and ``"rule"``.
longitude
Longitude position of geographically projected marks.
longitude2
Longitude-2 position for geographically projected ranged ``"area"``, ``"bar"``,
``"rect"``, and ``"rule"``.
opacity
Opacity of the marks.
**Default value:** If undefined, the default opacity depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s ``opacity``
property.
order
Order of the marks.
* For stacked marks, this ``order`` channel encodes `stack order
<https://vega.github.io/vega-lite/docs/stack.html#order>`__.
* For line and trail marks, this ``order`` channel encodes order of data points in
the lines. This can be useful for creating `a connected scatterplot
<https://vega.github.io/vega-lite/examples/connected_scatterplot.html>`__. Setting
``order`` to ``{"value": null}`` makes the line marks use the original order in
the data sources.
* Otherwise, this ``order`` channel encodes layer order of the marks.
**Note**: In aggregate plots, ``order`` field should be aggregated to avoid creating
additional aggregation grouping.
radius
The outer radius in pixels of arc marks.
radius2
The inner radius in pixels of arc marks.
row
A field definition for the vertical facet of trellis plots.
shape
Shape of the mark.
1. For ``point`` marks the supported values include: - plotting shapes:
``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``, ``"triangle-up"``,
``"triangle-down"``, ``"triangle-right"``, or ``"triangle-left"``. - the line
symbol ``"stroke"`` - centered directional shapes ``"arrow"``, ``"wedge"``, or
``"triangle"`` - a custom `SVG path string
<https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For correct
sizing, custom shape paths should be defined within a square bounding box with
coordinates ranging from -1 to 1 along both the x and y dimensions.)
2. For ``geoshape`` marks it should be a field definition of the geojson data
**Default value:** If undefined, the default shape depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#point-config>`__'s ``shape``
property. (``"circle"`` if unset.)
size
Size of the mark.
* For ``"point"``, ``"square"`` and ``"circle"``, - the symbol size, or pixel area
of the mark.
* For ``"bar"`` and ``"tick"`` - the bar and tick's size.
* For ``"text"`` - the text's font size.
* Size is unsupported for ``"line"``, ``"area"``, and ``"rect"``. (Use ``"trail"``
instead of line with varying size)
stroke
Stroke color of the marks. **Default value:** If undefined, the default color
depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s ``color``
property.
*Note:* The ``stroke`` encoding has higher precedence than ``color``, thus may
override the ``color`` encoding if conflicting encodings are specified.
strokeDash
Stroke dash of the marks.
**Default value:** ``[1,0]`` (No dash).
strokeOpacity
Stroke opacity of the marks.
**Default value:** If undefined, the default opacity depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s
``strokeOpacity`` property.
strokeWidth
Stroke width of the marks.
**Default value:** If undefined, the default stroke width depends on `mark config
<https://vega.github.io/vega-lite/docs/config.html#mark-config>`__'s ``strokeWidth``
property.
text
Text of the ``text`` mark.
theta
* For arc marks, the arc length in radians if theta2 is not specified, otherwise the
start arc angle. (A value of 0 indicates up or “north”, increasing values proceed
clockwise.)
* For text marks, polar coordinate angle in radians.
theta2
The end angle of arc marks in radians. A value of 0 indicates up or “north”,
increasing values proceed clockwise.
time
tooltip
The tooltip text to show upon mouse hover. Specifying ``tooltip`` encoding overrides
`the tooltip property in the mark definition
<https://vega.github.io/vega-lite/docs/mark.html#mark-def>`__.
See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__
documentation for a detailed discussion about tooltip in Vega-Lite.
url
The URL of an image mark.
x
X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without
specified ``x2`` or ``width``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
x2
X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
xError
Error value of x coordinates for error specified ``"errorbar"`` and ``"errorband"``.
xError2
Secondary error value of x coordinates for error specified ``"errorbar"`` and
``"errorband"``.
xOffset
Offset of x-position of the marks
y
Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"`` without
specified ``y2`` or ``height``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
y2
Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
yError
Error value of y coordinates for error specified ``"errorbar"`` and ``"errorband"``.
yError2
Secondary error value of y coordinates for error specified ``"errorbar"`` and
``"errorband"``.
yOffset
Offset of y-position of the marks
"""
angle: str | AnyAngle | IntoCondition | Map
color: str | AnyColor | IntoCondition | Map
column: str | Column | IntoCondition | Map
description: str | AnyDescription | IntoCondition | Map
detail: OneOrSeq[str | Detail | IntoCondition | Map]
facet: str | Facet | IntoCondition | Map
fill: str | AnyFill | IntoCondition | Map
fillOpacity: str | AnyFillOpacity | IntoCondition | Map
href: str | AnyHref | IntoCondition | Map
key: str | Key | IntoCondition | Map
latitude: str | AnyLatitude | IntoCondition | Map
latitude2: str | AnyLatitude2 | IntoCondition | Map
longitude: str | AnyLongitude | IntoCondition | Map
longitude2: str | AnyLongitude2 | IntoCondition | Map
opacity: str | AnyOpacity | IntoCondition | Map
order: OneOrSeq[str | AnyOrder | IntoCondition | Map]
radius: str | AnyRadius | IntoCondition | Map
radius2: str | AnyRadius2 | IntoCondition | Map
row: str | Row | IntoCondition | Map
shape: str | AnyShape | IntoCondition | Map
size: str | AnySize | IntoCondition | Map
stroke: str | AnyStroke | IntoCondition | Map
strokeDash: str | AnyStrokeDash | IntoCondition | Map
strokeOpacity: str | AnyStrokeOpacity | IntoCondition | Map
strokeWidth: str | AnyStrokeWidth | IntoCondition | Map
text: str | AnyText | IntoCondition | Map
theta: str | AnyTheta | IntoCondition | Map
theta2: str | AnyTheta2 | IntoCondition | Map
time: str | Time | IntoCondition | Map
tooltip: OneOrSeq[str | AnyTooltip | IntoCondition | Map]
url: str | AnyUrl | IntoCondition | Map
x: str | AnyX | IntoCondition | Map
x2: str | AnyX2 | IntoCondition | Map
xError: str | AnyXError | IntoCondition | Map
xError2: str | AnyXError2 | IntoCondition | Map
xOffset: str | AnyXOffset | IntoCondition | Map
y: str | AnyY | IntoCondition | Map
y2: str | AnyY2 | IntoCondition | Map
yError: str | AnyYError | IntoCondition | Map
yError2: str | AnyYError2 | IntoCondition | Map
yOffset: str | AnyYOffset | IntoCondition | Map
| EncodeKwds |
python | getsentry__sentry | tests/sentry/notifications/platform/api/endpoints/test_internal_registered_templates.py | {
"start": 443,
"end": 5355
} | class ____(APITestCase):
endpoint = "internal-notifications-registered-templates"
def test_unauthenticated(self) -> None:
response = self.get_response()
assert response.status_code == 401
def test_get_all_registered_templates(self) -> None:
self.login_as(self.user)
response = self.get_response()
assert response.status_code == 200
for source, template_cls in template_registry.registrations.items():
template = template_cls()
assert template.category.value in response.data
assert (
serialize_template(template=template, source=source)
in response.data[template.category.value]
)
def test_valid_template_serialization(self) -> None:
self.login_as(self.user)
response = self.get_response()
for templates_by_category in response.data.values():
for template in templates_by_category:
assert "source" in template
assert "category" in template
# The template registry should reflect the same source and category
template_cls = template_registry.get(template["source"])
assert template_cls.category.value == template["category"]
# The response should be nested in the proper category
assert response.data[template["category"]] == templates_by_category
assert "example" in template
assert "previews" in template
assert "email" in template["previews"]
assert "slack" in template["previews"]
def test_email_preview(self) -> None:
self.login_as(self.user)
response = self.get_response()
for templates_by_category in response.data.values():
for template in templates_by_category:
assert "email" in template["previews"]
assert isinstance(template["previews"]["email"]["subject"], str)
assert isinstance(template["previews"]["email"]["text_content"], str)
assert isinstance(template["previews"]["email"]["html_content"], str)
def test_discord_preview(self) -> None:
self.login_as(self.user)
response = self.get_response()
for templates_by_category in response.data.values():
for template in templates_by_category:
assert "discord" in template["previews"]
if template["example"]["actions"]:
num_actions = len(template["example"]["actions"])
# The first component is an action row, the contents of the row are the buttons
assert (
len(template["previews"]["discord"]["components"][0]["components"])
== num_actions
)
assert template["previews"]["discord"]["content"] == ""
assert len(template["previews"]["discord"]["embeds"]) == 1
def find_block_by_type(blocks: list[dict[str, Any]], block_type: str) -> dict[str, Any] | None:
"""Find the first block with the specified type."""
return next((block for block in blocks if block["type"] == block_type), None)
def assert_header_block(block: dict[str, Any], expected_text: str) -> None:
"""Assert that a block is a valid header block with expected text."""
assert block["type"] == "header"
assert block["text"]["type"] == "plain_text"
assert block["text"]["text"] == expected_text
def assert_section_block(
block: dict[str, Any], expected_text: str, text_type: str = "mrkdwn"
) -> None:
"""Assert that a block is a valid section block with expected text."""
assert block["type"] == "section"
assert block["text"]["type"] == text_type
assert block["text"]["text"] == expected_text
def assert_button_element(element: dict[str, Any], expected_text: str, expected_url: str) -> None:
"""Assert that an element is a valid button with expected text and URL."""
assert element["type"] == "button"
assert element["text"]["text"] == expected_text
assert element["url"] == expected_url
def assert_image_block(block: dict[str, Any], expected_url: str, expected_alt_text: str) -> None:
"""Assert that a block is a valid image block with expected URL and alt text."""
assert block["type"] == "image"
assert block["image_url"] == expected_url
assert block["alt_text"] == expected_alt_text
def find_section_block_by_text(
blocks: list[dict[str, Any]], text_content: str
) -> dict[str, Any] | None:
"""Find a section block that contains specific text content."""
return next(
(
block
for block in blocks
if block["type"] == "section" and block.get("text", {}).get("text") == text_content
),
None,
)
| InternalRegisteredTemplatesEndpointTest |
python | ray-project__ray | release/nightly_tests/dataset/batch_inference_benchmark.py | {
"start": 3327,
"end": 3838
} | class ____:
def __init__(self, model, device):
self._model = ray.get(model)
self._model.eval()
self._model.to(device)
self._device = device
def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
with torch.inference_mode():
output = self._model(torch.as_tensor(batch["image"], device=self._device))
return {"predictions": output.cpu().numpy()}
if __name__ == "__main__":
args = parse_args()
main(args)
| Predictor |
python | django-compressor__django-compressor | compressor/filters/jsmin/__init__.py | {
"start": 123,
"end": 336
} | class ____(CallbackOutputFilter):
callback = "rjsmin.jsmin"
dependencies = ["rjsmin"]
kwargs = {"keep_bang_comments": True}
# This is for backwards compatibility
JSMinFilter = rJSMinFilter
| rJSMinFilter |
python | pytorch__pytorch | torch/_inductor/codegen/cpp.py | {
"start": 15339,
"end": 21763
} | class ____(FusedSchedulerNode):
@classmethod
def fuse( # type: ignore[override]
cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode, outer_loop_fusion_depth
):
assert node1.scheduler is node2.scheduler
assert all(
type(node)
in (
OuterLoopFusedSchedulerNode,
SchedulerNode,
FusedSchedulerNode,
)
for node in (node1, node2)
)
if any(type(node) is OuterLoopFusedSchedulerNode for node in (node1, node2)):
return cls(
node1.scheduler,
# pyrefly: ignore [bad-argument-type]
(
list(node1.get_outer_nodes())
if type(node1) is OuterLoopFusedSchedulerNode
else [
node1,
]
)
+ (
list(node2.get_outer_nodes())
if type(node2) is OuterLoopFusedSchedulerNode
else [
node2,
]
),
outer_loop_fusion_depth,
)
else:
return cls(node1.scheduler, [node1, node2], outer_loop_fusion_depth) # type: ignore[list-item]
def __init__(
self,
scheduler: "Scheduler",
outer_fused_nodes: list[Union[FusedSchedulerNode, SchedulerNode]],
outer_loop_fusion_depth,
):
self.outer_fused_nodes: list[Union[FusedSchedulerNode, SchedulerNode]] = (
outer_fused_nodes
)
self.outer_loop_fusion_depth = outer_loop_fusion_depth
flatten_snodes = []
for _node in self.outer_fused_nodes:
assert isinstance(_node, (SchedulerNode, FusedSchedulerNode))
flatten_snodes.extend(list(_node.get_nodes()))
super().__init__(scheduler, flatten_snodes) # type: ignore[arg-type]
def get_outer_nodes(self):
return self.outer_fused_nodes
def check_outer_fusion_loop_level_attr(
self, cpp_kernel_proxy_list, outer_loop_fusion_depth
):
# This function ensures that the same tiling split is applied at each loop level within the outer loop fusion depth.
# In the fusion stage, we only examine nodes with same vars and reduce.
# However, for nodes with same vars and reduce, the loops may still have different tile splits.
# For example (test_expr_vec_non_contiguous in test_cpu_repro.py):
# * buf0 tiling along the 2nd loop level, buf1 tiling along the 3rd loop level.
# If the check failed, we should fall back to standard loop codegen.
def _inner(
left_loop_nest: LoopNest,
right_loop_nest: LoopNest,
loop_fusion_depth: int,
current_checking_depth: int,
) -> bool:
assert left_loop_nest.loops
assert right_loop_nest.loops
left_loop_level = left_loop_nest.loops[current_checking_depth]
right_loop_level = right_loop_nest.loops[current_checking_depth]
# Check if same loop level attr
outer_loops_attr_compare_list = [
"var",
"size",
"offset",
"steps",
]
if not (
all(
getattr(left_loop_level, attr_compare)
== getattr(right_loop_level, attr_compare)
for attr_compare in outer_loops_attr_compare_list
)
):
return False
assert loop_fusion_depth >= 1
if (loop_fusion_depth := loop_fusion_depth - 1) > 0:
# Check next loop level attr
current_checking_depth = current_checking_depth + 1
assert current_checking_depth < len(left_loop_nest.loops)
assert current_checking_depth < len(right_loop_nest.loops)
if not _inner(
left_loop_nest,
right_loop_nest,
loop_fusion_depth,
current_checking_depth,
):
return False
return True
for idx in range(len(cpp_kernel_proxy_list) - 1):
left_loop_nest = cpp_kernel_proxy_list[idx].loop_nest
right_loop_nest = cpp_kernel_proxy_list[idx + 1].loop_nest
if not _inner(
left_loop_nest,
right_loop_nest,
outer_loop_fusion_depth,
0,
):
return False
for cpp_kernel_proxy in cpp_kernel_proxy_list:
outer_ranges = functools.reduce(
operator.mul,
cpp_kernel_proxy.ranges[:outer_loop_fusion_depth],
)
# When the range of the first inner loop is much larger than the range of
# all outer loops, do not fuse outer loop and fallback to standard loop codegen,
# so that the inner loops with larger range have a chance to be parallelized.
# We set a conservative threshold here:
# First inner loop range / all outer loops range > 300.
if (
len(cpp_kernel_proxy.ranges) > outer_loop_fusion_depth
and isinstance(outer_ranges, sympy.Integer)
and isinstance(
cpp_kernel_proxy.ranges[outer_loop_fusion_depth],
sympy.Integer,
)
and outer_ranges * 300
< cpp_kernel_proxy.ranges[outer_loop_fusion_depth]
):
return False
return True
def merge_outer_fusion_kernels(
self,
cpp_kernel_proxy_list,
):
kernel_group = cpp_kernel_proxy_list[0].kernel_group
outer_loop_fused_kernel = OuterLoopFusedKernel(kernel_group)
outer_loop_fused_kernel.inner = [
proxy.loop_nest.from_loop_level(self.outer_loop_fusion_depth)
for proxy in cpp_kernel_proxy_list
]
outer_fused_proxy = cpp_kernel_proxy_list[0]
outer_fused_proxy.loop_nest.kernel = outer_loop_fused_kernel
outer_fused_proxy.loop_nest.loops = outer_fused_proxy.loop_nest.loops[
: self.outer_loop_fusion_depth
]
return outer_fused_proxy
| OuterLoopFusedSchedulerNode |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.