language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
numpy__numpy
|
numpy/distutils/command/egg_info.py
|
{
"start": 75,
"end": 921
}
|
class ____(_egg_info):
def run(self):
if 'sdist' in sys.argv:
import warnings
import textwrap
msg = textwrap.dedent("""
`build_src` is being run, this may lead to missing
files in your sdist! You want to use distutils.sdist
instead of the setuptools version:
from distutils.command.sdist import sdist
cmdclass={'sdist': sdist}"
See numpy's setup.py or gh-7131 for details.""")
warnings.warn(msg, UserWarning, stacklevel=2)
# We need to ensure that build_src has been executed in order to give
# setuptools' egg_info command real filenames instead of functions which
# generate files.
self.run_command("build_src")
_egg_info.run(self)
|
egg_info
|
python
|
PrefectHQ__prefect
|
tests/blocks/test_abstract.py
|
{
"start": 1059,
"end": 1927
}
|
class ____:
def test_notification_block_is_abstract(self):
with pytest.raises(
TypeError, match="Can't instantiate abstract class NotificationBlock"
):
NotificationBlock()
def test_notification_block_implementation(self, caplog):
class ANotificationBlock(NotificationBlock):
def notify(self, body, subject=None):
self.logger.info(f"Notification sent with {body} {subject}.")
a_notification_block = ANotificationBlock()
a_notification_block.notify("body", "subject")
# test logging
assert hasattr(a_notification_block, "logger")
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.name == "prefect.ANotificationBlock"
assert record.msg == "Notification sent with body subject."
|
TestNotificationBlock
|
python
|
doocs__leetcode
|
lcof2/剑指 Offer II 116. 朋友圈/Solution.py
|
{
"start": 0,
"end": 432
}
|
class ____:
def findCircleNum(self, isConnected: List[List[int]]) -> int:
def dfs(i):
vis[i] = True
for j in range(n):
if not vis[j] and isConnected[i][j]:
dfs(j)
n = len(isConnected)
vis = [False] * n
ans = 0
for i in range(n):
if not vis[i]:
dfs(i)
ans += 1
return ans
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/3408. Design Task Manager/3408.py
|
{
"start": 319,
"end": 1302
}
|
class ____:
def __init__(self, tasks: list[list[int]]):
self.taskMap = SortedDict() # {taskId: Task}, keeps tasks sorted by taskId
self.taskSet = SortedSet() # Stores tasks sorted by priority and taskId
for task in tasks:
self.add(task[0], task[1], task[2])
def add(self, userId: int, taskId: int, priority: int) -> None:
task = Task(userId, taskId, priority)
self.taskMap[taskId] = task
self.taskSet.add(task)
def edit(self, taskId: int, newPriority: int) -> None:
task = self.taskMap[taskId]
self.taskSet.remove(task)
editedTask = Task(task.userId, taskId, newPriority)
self.taskSet.add(editedTask)
self.taskMap[taskId] = editedTask
def rmv(self, taskId: int) -> None:
task = self.taskMap[taskId]
self.taskSet.remove(task)
del self.taskMap[taskId]
def execTop(self):
if not self.taskSet:
return -1
task = self.taskSet.pop(0)
del self.taskMap[task.taskId]
return task.userId
|
TaskManager
|
python
|
google__jax
|
jax/experimental/sparse/transform.py
|
{
"start": 9277,
"end": 37575
}
|
class ____(core.Trace):
__slots__ = ("parent_trace", "tag", "spenv")
def __init__(self, parent_trace, tag, spenv):
super().__init__()
self.parent_trace = parent_trace
self.tag = tag
self.spenv = spenv
def to_sparse_tracer(self, val):
if isinstance(val, SparseTracer) and self.tag is val._trace.tag:
return val
else:
with core.set_current_trace(self.parent_trace):
spvalue, = arrays_to_spvalues(self.spenv, [val])
return SparseTracer(self, spvalue=spvalue)
def process_primitive(self, primitive, tracers, params):
tracers = [self.to_sparse_tracer(t) for t in tracers]
spvalues = [t._spvalue for t in tracers]
if any(spvalue.is_sparse() for spvalue in spvalues):
if primitive not in sparse_rules_bcoo:
_raise_unimplemented_primitive(primitive)
with core.set_current_trace(self.parent_trace):
out_spvalues = sparse_rules_bcoo[primitive](self.spenv, *(t._spvalue for t in tracers), **params)
else:
out_bufs = primitive.bind_with_trace(self.parent_trace, tuple(self.spenv.data(spvalue) for spvalue in spvalues), params)
out_spvalues = arrays_to_spvalues(self.spenv, out_bufs if primitive.multiple_results else [out_bufs])
out_tracers = tuple(SparseTracer(self, spvalue=spvalue) for spvalue in out_spvalues)
return out_tracers if primitive.multiple_results else out_tracers[0]
def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):
assert False
spvalues = tuple(t._spvalue for t in tracers)
in_bufs = self.spenv._buffers
fun, out_spvalues = sparsify_subtrace(f, self.main, spvalues)
if any(params['donated_invars']):
raise NotImplementedError("sparsify does not support donated_invars")
params = dict(params, donated_invars=tuple(False for buf in in_bufs))
bufs_out = call_primitive.bind(fun, *in_bufs, **params)
return [SparseTracer(self, spvalue=spvalue) for spvalue in out_spvalues()]
def process_custom_jvp_call(self, primitive, fun, jvp, tracers, *, symbolic_zeros):
# TODO(jakevdp): handle the jvp here
del primitive, jvp, symbolic_zeros
with core.set_current_trace(self):
return fun.call_wrapped(*tracers)
@lu.transformation_with_aux2
def sparsify_subtrace(f, store, tag, spenv, spvalues, *bufs):
with core.take_current_trace() as parent:
trace = SparseTrace(parent, tag, spenv)
with core.set_current_trace(trace):
in_tracers = [SparseTracer(trace, spvalue=spvalue) for spvalue in spvalues]
outs = f(*in_tracers)
out_traces = [trace.to_sparse_tracer(out) for out in outs]
buffers = spenv._buffers
store.store([out._spvalue for out in out_traces])
return buffers
def sparsify_fun(wrapped_fun, args: list[ArrayOrSparse]):
tag = core.TraceTag()
spenv = SparsifyEnv()
spvalues = arrays_to_spvalues(spenv, args)
in_bufs = spenv._buffers
fun, out_spvalues = sparsify_subtrace(wrapped_fun, tag, spenv, spvalues)
out_bufs = fun.call_wrapped(*in_bufs)
spenv = SparsifyEnv(out_bufs)
return spvalues_to_arrays(spenv, out_spvalues())
def _sparsify_with_tracer(fun: Callable):
"""Implementation of sparsify() using tracers."""
@functools.wraps(fun)
def _wrapped(*args):
args_flat, in_tree = tree_flatten(args, is_leaf=_is_sparse_obj)
wrapped_fun, out_tree = flatten_fun_nokwargs(
lu.wrap_init(fun,
debug_info=api_util.debug_info("sparsify", fun, args, {})),
in_tree)
out = sparsify_fun(wrapped_fun, args_flat)
return tree_unflatten(out_tree(), out)
return _wrapped
# ------------------------------------------------------------------------------
# Implementation of sparsify() using a jaxpr interpreter.
def eval_sparse(
jaxpr: core.Jaxpr,
consts: Sequence[Array], # all consts are dense
spvalues: Sequence[SparsifyValue], # mix of sparse and dense pointers into spenv
spenv: SparsifyEnv,
) -> Sequence[SparsifyValue]:
env : dict[core.Var, SparsifyValue] = {}
def read(var: core.Atom) -> SparsifyValue:
# all literals are dense
if isinstance(var, core.Literal):
return spenv.dense(var.val)
else:
assert isinstance(var, core.Var)
return env[var]
def write_buffer(var: core.Var, a: Array) -> None:
if isinstance(var, core.DropVar):
return
env[var] = spenv.dense(a)
def write(var: core.Var, a: SparsifyValue) -> None:
if isinstance(var, core.DropVar):
return
assert a is not None
env[var] = a
safe_map(write_buffer, jaxpr.constvars, consts)
safe_map(write, jaxpr.invars, spvalues)
for eqn in jaxpr.eqns:
prim = eqn.primitive
invals = safe_map(read, eqn.invars)
if any(val.is_bcsr() for val in invals):
if prim not in sparse_rules_bcsr:
_raise_unimplemented_primitive(prim)
out = sparse_rules_bcsr[prim](spenv, *invals, **eqn.params)
elif any(val.is_bcoo() for val in invals):
if prim not in sparse_rules_bcoo:
_raise_unimplemented_primitive(prim)
out = sparse_rules_bcoo[prim](spenv, *invals, **eqn.params)
else:
out_bufs = prim.bind(*(spenv.data(val) for val in invals), **eqn.params)
out_bufs = out_bufs if prim.multiple_results else [out_bufs]
out = []
for buf, outvar in safe_zip(out_bufs, eqn.outvars):
if isinstance(outvar, core.DropVar):
out.append(None)
else:
out.append(spenv.dense(buf))
safe_map(write, eqn.outvars, out)
return safe_map(read, jaxpr.outvars)
def sparsify_raw(f):
def wrapped(
spenv: SparsifyEnv, *spvalues: SparsifyValue, **params: Any
) -> tuple[Sequence[SparsifyValue], pytree.PyTreeDef]:
spvalues_flat, in_tree = tree_flatten(spvalues, is_leaf=_is_spvalue)
in_avals_flat = spvalues_to_avals(spenv, spvalues_flat)
wrapped_fun, out_tree = flatten_fun_nokwargs(
lu.wrap_init(
f, params,
debug_info=api_util.debug_info("sparsify", f,
in_tree.unflatten([True] * len(in_avals_flat)),
{})),
in_tree)
jaxpr, out_avals_flat, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, in_avals_flat)
result = eval_sparse(jaxpr, consts, spvalues_flat, spenv)
if len(out_avals_flat) != len(result):
raise Exception("Internal: eval_sparse does not return expected number of arguments. "
"Got {result} for avals {out_avals_flat}")
return result, out_tree()
return wrapped
def _sparsify_with_interpreter(f):
"""Implementation of sparsify() using jaxpr interpreter."""
f_raw = sparsify_raw(f)
@functools.wraps(f)
def wrapped(*args, **params):
spenv = SparsifyEnv()
spvalues = arrays_to_spvalues(spenv, args)
spvalues_out, out_tree = f_raw(spenv, *spvalues, **params)
out = spvalues_to_arrays(spenv, spvalues_out)
return tree_unflatten(out_tree, out)
return wrapped
def sparsify(f, use_tracer=False):
"""Experimental sparsification transform.
Examples:
Decorate JAX functions to make them compatible with :class:`jax.experimental.sparse.BCOO`
matrices:
>>> from jax.experimental import sparse
>>> @sparse.sparsify
... def f(M, v):
... return 2 * M.T @ v
>>> M = sparse.BCOO.fromdense(jnp.arange(12).reshape(3, 4))
>>> v = jnp.array([3, 4, 2])
>>> f(M, v)
Array([ 64, 82, 100, 118], dtype=int32)
"""
if use_tracer:
return _sparsify_with_tracer(f)
else:
return _sparsify_with_interpreter(f)
# ------------------------------------------------------------------------------
# Sparse rules for various primitives
def _ensure_unique_indices(spenv, spvalue):
"""Return an spvalue representation with deduplicated indices."""
if spvalue.is_dense() or spvalue.unique_indices:
return spvalue
arr = spvalues_to_arrays(spenv, spvalue)
arr = arr.sum_duplicates(nse=arr.nse, remove_zeros=False)
return arrays_to_spvalues(spenv, arr)
def _zero_preserving_unary_op(prim, linear):
def func(spenv, *spvalues, **kwargs):
assert len(spvalues) == 1
spvalue = spvalues[0]
if not linear:
# For non-linear unary operations, we need to ensure that
# indices are unique before applying the operator elementwise.
spvalue = _ensure_unique_indices(spenv, spvalue)
buf = spenv.data(spvalue)
buf_out = prim.bind(buf, **kwargs)
if spvalues[0].is_sparse():
out_spvalue = spenv.sparse(spvalue.shape, buf_out,
indices_ref=spvalue.indices_ref,
indptr_ref=spvalue.indptr_ref,
indices_sorted=spvalue.indices_sorted,
unique_indices=spvalue.unique_indices)
else:
out_spvalue = spenv.dense(buf)
return (out_spvalue,)
return func
for _prim in _zero_preserving_unary_primitives:
sparse_rules_bcoo[_prim] = _zero_preserving_unary_op(_prim, linear=False)
sparse_rules_bcsr[_prim] = _zero_preserving_unary_op(_prim, linear=False)
for _prim in _zero_preserving_linear_unary_primitives:
sparse_rules_bcoo[_prim] = _zero_preserving_unary_op(_prim, linear=True)
sparse_rules_bcsr[_prim] = _zero_preserving_unary_op(_prim, linear=True)
def _standard_sparse_rule(prim, sparse_op):
def _sparse_rule(spenv, *spvalues, **kwds):
result = sparse_op(*spvalues_to_arrays(spenv, spvalues), **kwds)
return arrays_to_spvalues(spenv, result if prim.multiple_results else [result])
return _sparse_rule
_BCOO_STANDARD_PRIMITIVES = {
lax.broadcast_in_dim_p: sparse.bcoo_broadcast_in_dim,
lax.concatenate_p: lambda *a, **k: sparse.bcoo_concatenate(a, **k),
lax.conv_general_dilated_p: sparse.bcoo_conv_general_dilated,
lax.dot_general_p: sparse.bcoo_dot_general,
lax.dynamic_slice_p: lambda *a, **k: sparse.bcoo_dynamic_slice(a[0], a[1:], **k),
lax.reshape_p: sparse.bcoo_reshape,
lax.rev_p: sparse.bcoo_rev,
lax.slice_p: sparse.bcoo_slice,
lax.squeeze_p: sparse.bcoo_squeeze,
}
for prim, bcoo_impl in _BCOO_STANDARD_PRIMITIVES.items():
sparse_rules_bcoo[prim] = _standard_sparse_rule(prim, bcoo_impl)
_BCSR_STANDARD_PRIMITIVES = {
lax.dot_general_p: sparse.bcsr_dot_general,
lax.broadcast_in_dim_p: sparse.bcsr_broadcast_in_dim,
lax.concatenate_p: lambda *a, **k: sparse.bcsr_concatenate(a, **k),
}
for prim, bcsr_impl in _BCSR_STANDARD_PRIMITIVES.items():
sparse_rules_bcsr[prim] = _standard_sparse_rule(prim, bcsr_impl)
def _integer_pow_sparse(spenv, *spvalues, y):
if y <= 0:
raise NotImplementedError(f"sparse rule for {lax.integer_pow_p} with non-positive exponent {y} is "
"not implemented because it would result in dense output. If this is your "
"intent, use sparse.todense() to convert your argument to a dense array.")
return _zero_preserving_unary_op(lax.integer_pow_p, False)(spenv, *spvalues, y=y)
sparse_rules_bcoo[lax.integer_pow_p] = _integer_pow_sparse
sparse_rules_bcsr[lax.integer_pow_p] = _integer_pow_sparse
def _transpose_sparse(spenv, *spvalues, permutation):
permutation = tuple(permutation)
args = spvalues_to_arrays(spenv, spvalues)
shape = args[0].shape
mat_transposed = sparse.bcoo_transpose(args[0], permutation=permutation)
out_shape = tuple(shape[i] for i in permutation)
n_batch = args[0].indices.ndim - 2
n_sparse = args[0].indices.shape[-1]
batch_dims_unchanged = (permutation[:n_batch] == tuple(range(n_batch)))
dense_dims_unchanged = (permutation[n_batch + n_sparse:] == tuple(range(n_batch + n_sparse, len(shape))))
sparse_dims_unchanged = (permutation[n_batch:n_batch + n_sparse] == tuple(range(n_batch, n_batch + n_sparse)))
# Data is unchanged if batch & dense dims are not permuted
kwds = {}
if batch_dims_unchanged and dense_dims_unchanged:
kwds['data_ref'] = spvalues[0].data_ref
else:
kwds['data'] = mat_transposed.data
# Indices unchanged if batch & sparse dims are not permuted
if batch_dims_unchanged and sparse_dims_unchanged:
kwds['indices_ref'] = spvalues[0].indices_ref
else:
kwds['indices'] = mat_transposed.indices
kwds['indices_sorted'] = mat_transposed.indices_sorted
kwds['unique_indices'] = mat_transposed.unique_indices
spvalue = spenv.sparse(out_shape, **kwds)
return (spvalue,)
sparse_rules_bcoo[lax.transpose_p] = _transpose_sparse
def _add_sparse(spenv, *spvalues):
X, Y = spvalues
out_shape = lax.broadcast_shapes(X.shape, Y.shape)
if X.is_sparse() and Y.is_sparse():
if X.shape != Y.shape:
raise NotImplementedError("Addition between sparse matrices of different shapes.")
if X.indices_ref == Y.indices_ref:
out_data = lax.add(spenv.data(X), spenv.data(Y))
if config.enable_checks.value:
assert X.indices_sorted == Y.indices_sorted
assert X.unique_indices == Y.unique_indices
out_spvalue = spenv.sparse(X.shape, out_data, indices_ref=X.indices_ref,
indices_sorted=X.indices_sorted,
unique_indices=X.unique_indices)
elif spenv.indices(X).ndim != spenv.indices(Y).ndim or spenv.data(X).ndim != spenv.data(Y).ndim:
raise NotImplementedError("Addition between sparse matrices with different batch/dense dimensions.")
else:
out_indices = lax.concatenate([spenv.indices(X), spenv.indices(Y)], dimension=spenv.indices(X).ndim - 2)
out_data = lax.concatenate([spenv.data(X), spenv.data(Y)], dimension=spenv.indices(X).ndim - 2)
out_spvalue = spenv.sparse(X.shape, out_data, out_indices)
else:
if Y.is_sparse():
X, Y = Y, X
assert X.is_sparse() and Y.is_dense()
if Y.shape != out_shape:
raise NotImplementedError(
"Addition between a sparse array X and a dense array Y is not implemented when "
"the output shape is larger than Y.shape. This is to prevent silent densification "
"of a large sparse array. If this is your intent, you can explicitly cast the sparse "
"array to a dense matrix.")
X_promoted, Y_promoted = spvalues_to_arrays(spenv, (X, Y))
out = X_promoted.todense() + Y_promoted
out_spvalue = spenv.dense(out)
return (out_spvalue,)
sparse_rules_bcoo[lax.add_p] = _add_sparse
def _sub_sparse(spenv, *spvalues):
X, Y = spvalues
if X.is_sparse() and Y.is_sparse():
return _add_sparse(spenv, X, *sparse_rules_bcoo[lax.neg_p](spenv, Y))
else:
raise NotImplementedError("Subtraction between sparse and dense array.")
sparse_rules_bcoo[lax.sub_p] = _sub_sparse
def _mul_sparse(spenv, *spvalues):
X, Y = spvalues
if X.is_sparse() and Y.is_sparse():
if X.indices_ref == Y.indices_ref and X.unique_indices:
if config.enable_checks.value:
assert X.indices_sorted == Y.indices_sorted
assert X.unique_indices == Y.unique_indices
out_data = lax.mul(spenv.data(X), spenv.data(Y))
out_spvalue = spenv.sparse(X.shape, out_data, indices_ref=X.indices_ref,
indices_sorted=X.indices_sorted,
unique_indices=True)
else:
X_promoted, Y_promoted = spvalues_to_arrays(spenv, spvalues)
mat = bcoo_multiply_sparse(X_promoted, Y_promoted)
out_spvalue = spenv.sparse(mat.shape, mat.data, mat.indices)
else:
if Y.is_sparse():
X, Y = Y, X
X_promoted = spvalues_to_arrays(spenv, X)
out_data = bcoo_multiply_dense(X_promoted, spenv.data(Y))
out_spvalue = spenv.sparse(X.shape, out_data, indices_ref=X.indices_ref,
indices_sorted=X.indices_sorted,
unique_indices=X.unique_indices)
return (out_spvalue,)
sparse_rules_bcoo[lax.mul_p] = _mul_sparse
def _div_sparse(spenv, *spvalues):
X, Y = spvalues
if Y.is_sparse():
raise NotImplementedError(
"Division by a sparse array is not implemented because it "
"would result in dense output. If this is your intent, use "
"sparse.todense() to convert your arguments to a dense array.")
X_promoted = spvalues_to_arrays(spenv, X)
out_data = bcoo_multiply_dense(X_promoted, 1. / spenv.data(Y))
out_spvalue = spenv.sparse(X.shape, out_data, indices_ref=X.indices_ref,
indices_sorted=X.indices_sorted,
unique_indices=X.unique_indices)
return (out_spvalue,)
sparse_rules_bcoo[lax.div_p] = _div_sparse
def _reduce_sum_sparse(spenv, *spvalues, axes, out_sharding):
X, = spvalues
X_promoted = spvalues_to_arrays(spenv, X)
mat = sparse.bcoo_reduce_sum(X_promoted, axes=axes)
out_shape = mat.shape
if out_shape == ():
out_spvalue = spenv.dense(mat.data.sum())
else:
out_spvalue = spenv.sparse(out_shape, mat.data, mat.indices)
return (out_spvalue,)
sparse_rules_bcoo[lax.reduce_sum_p] = _reduce_sum_sparse
def _gather_sparse_rule(spenv, *args, dimension_numbers, slice_sizes, unique_indices,
indices_are_sorted, mode, fill_value):
operand, start_indices = spvalues_to_arrays(spenv, args)
result = sparse.bcoo_gather(operand, start_indices, dimension_numbers=dimension_numbers,
slice_sizes=slice_sizes, unique_indices=unique_indices,
indices_are_sorted=indices_are_sorted,
mode=mode, fill_value=fill_value)
return arrays_to_spvalues(spenv, (result,))
sparse_rules_bcoo[lax.gather_p] = _gather_sparse_rule
def _sparsify_jaxpr(spenv: SparsifyEnv,
jaxpr: core.ClosedJaxpr, *spvalues):
# TODO(jakevdp): currently this approach discards all information about
# shared data & indices when generating the sparsified jaxpr. The
# current approach produces valid sparsified while loops, but they
# don't work in corner cases (see associated TODO in sparsify_test.py)
out_tree: pytree.PyTreeDef | None = None
def wrapped(*args_flat):
# TODO(frostig,jakevdp): This closes over `spenv`, which can bring
# in buffers from the "outer scope" as constants. Is this a
# problem for primitives like cond and while_loop, which always
# convert constvars to invars when staging out their subjaxprs?
nonlocal out_tree
args = tree_unflatten(in_tree, args_flat)
spvalues = arrays_to_spvalues(spenv, args)
result = eval_sparse(jaxpr.jaxpr, jaxpr.consts, spvalues, spenv)
out = spvalues_to_arrays(spenv, result)
out_flat, out_tree = tree_flatten(out)
return out_flat
args = spvalues_to_arrays(spenv, spvalues)
args_flat, in_tree = tree_flatten(args)
avals_flat = [core.get_aval(arg) for arg in args_flat]
sp_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(wrapped, debug_info=jaxpr.jaxpr.debug_info.with_unknown_names()), avals_flat)
sp_jaxpr = pe.ClosedJaxpr(sp_jaxpr, consts)
assert out_tree is not None
return sp_jaxpr, out_tree
def _while_sparse(spenv, *spvalues, cond_jaxpr, cond_nconsts, body_jaxpr, body_nconsts):
cond_const_spvalues, body_const_spvalues, init_val_spvalues = split_list(
spvalues, [cond_nconsts, body_nconsts])
cond_sp_jaxpr, _ = _sparsify_jaxpr(spenv, cond_jaxpr, *cond_const_spvalues, *init_val_spvalues)
body_sp_jaxpr, out_tree = _sparsify_jaxpr(spenv, body_jaxpr, *body_const_spvalues, *init_val_spvalues)
cond_consts, _ = tree_flatten(spvalues_to_arrays(spenv, cond_const_spvalues))
body_consts, _ = tree_flatten(spvalues_to_arrays(spenv, body_const_spvalues))
init_vals, _ = tree_flatten(spvalues_to_arrays(spenv, init_val_spvalues))
out_flat = lax.while_p.bind(*cond_consts, *body_consts, *init_vals,
cond_nconsts=len(cond_consts), cond_jaxpr=cond_sp_jaxpr,
body_nconsts=len(body_consts), body_jaxpr=body_sp_jaxpr)
return arrays_to_spvalues(spenv, tree_unflatten(out_tree, out_flat))
sparse_rules_bcoo[lax.while_p] = _while_sparse
def _pjit_sparse(spenv, *spvalues, jaxpr, in_shardings, out_shardings,
in_layouts, out_layouts, donated_invars, ctx_mesh, name,
keep_unused, inline, compiler_options_kvs):
if any(donated_invars):
raise NotImplementedError("sparse xla_call with donated_invars")
sp_call_jaxpr, out_tree = _sparsify_jaxpr(spenv, jaxpr, *spvalues)
args_flat, _ = tree_flatten(spvalues_to_arrays(spenv, spvalues))
donated_invars = tuple(False for arg in args_flat)
# TODO(yashkatariya, vanderplas): Flatten twice and set the correct sharding
# for data and indices.
in_shardings = in_shardings + tuple(
sharding_impls.UNSPECIFIED
for _ in range(len(args_flat) - len(in_shardings))
)
out_shardings = out_shardings + tuple(
sharding_impls.UNSPECIFIED
for _ in range(len(sp_call_jaxpr.out_avals) - len(out_shardings))
)
in_layouts = in_layouts + tuple(
None for _ in range(len(args_flat) - len(in_layouts))
)
out_layouts = out_layouts + tuple(
None for _ in range(len(sp_call_jaxpr.out_avals) - len(out_layouts))
)
out_flat = pjit.jit_p.bind(
*args_flat,
jaxpr=sp_call_jaxpr,
in_shardings=in_shardings,
out_shardings=out_shardings,
in_layouts=in_layouts,
out_layouts=out_layouts,
donated_invars=donated_invars,
ctx_mesh=ctx_mesh,
name=name,
keep_unused=keep_unused,
inline=inline,
compiler_options_kvs=compiler_options_kvs)
return arrays_to_spvalues(spenv, tree_unflatten(out_tree, out_flat))
sparse_rules_bcoo[pjit.jit_p] = _pjit_sparse
def _duplicate_for_sparse_spvalues(spvalues, params):
for spvalue, param in safe_zip(spvalues, params):
yield from [param, param] if spvalue.is_sparse() else [param]
def _scan_sparse(spenv, *spvalues, jaxpr, num_consts, num_carry, **params):
const_spvalues, carry_spvalues, xs_spvalues = split_list(
spvalues, [num_consts, num_carry])
if xs_spvalues:
# TODO(jakevdp): we don't want to pass xs_spvalues, we want to pass one row
# of xs spvalues. How to do this?
raise NotImplementedError("sparse rule for scan with x values.")
sp_jaxpr, _ = _sparsify_jaxpr(spenv, jaxpr, *const_spvalues, *carry_spvalues, *xs_spvalues)
consts, _ = tree_flatten(spvalues_to_arrays(spenv, const_spvalues))
carry, carry_tree = tree_flatten(spvalues_to_arrays(spenv, carry_spvalues))
xs, xs_tree = tree_flatten(spvalues_to_arrays(spenv, xs_spvalues))
# params['linear'] has one entry per arg; expand it to match the sparsified args.
const_linear, carry_linear, xs_linear = split_list(
params.pop('linear'), [num_consts, num_carry])
sp_linear = (
*_duplicate_for_sparse_spvalues(const_spvalues, const_linear),
*_duplicate_for_sparse_spvalues(carry_spvalues, carry_linear),
*_duplicate_for_sparse_spvalues(xs_spvalues, xs_linear))
out = lax.scan_p.bind(*consts, *carry, *xs, jaxpr=sp_jaxpr, linear=sp_linear,
num_consts=len(consts), num_carry=len(carry), **params)
carry_out = tree_unflatten(carry_tree, out[:len(carry)])
xs_out = tree_unflatten(xs_tree, out[len(carry):])
return arrays_to_spvalues(spenv, carry_out + xs_out)
sparse_rules_bcoo[lax.scan_p] = _scan_sparse
def _cond_sparse(spenv, pred, *operands, branches, **params):
sp_branches, treedefs = zip(*(_sparsify_jaxpr(spenv, jaxpr, *operands)
for jaxpr in branches))
_check_tree_and_avals("sparsified true_fun output",
treedefs[0], sp_branches[0].out_avals,
"sparsified false_fun output",
treedefs[1], sp_branches[1].out_avals)
args, _ = tree_flatten(spvalues_to_arrays(spenv, (pred, *operands)))
out_flat = lax.cond_p.bind(*args, branches=tuple(sp_branches), **params)
out = tree_unflatten(treedefs[0], out_flat)
return arrays_to_spvalues(spenv, out)
sparse_rules_bcoo[lax.cond_p] = _cond_sparse
def _todense_sparse_rule(spenv, spvalue, *, tree):
del tree # TODO(jakvdp): we should assert that tree is PytreeDef(*)
out = spvalues_to_arrays(spenv, spvalue).todense()
return (spenv.dense(out),)
sparse_rules_bcoo[sparse.todense_p] = _todense_sparse_rule
sparse_rules_bcsr[sparse.todense_p] = _todense_sparse_rule
def _custom_jvp_sparse_rule(spenv, *spvalues, **params):
call_jaxpr: core.ClosedJaxpr = params.pop('call_jaxpr')
jvp_jaxpr_fun: lu.WrappedFun = params.pop('jvp_jaxpr_fun')
num_consts: int = params.pop('num_consts')
sp_call_jaxpr, out_tree = _sparsify_jaxpr(spenv, call_jaxpr, *spvalues)
def fun(*arrs):
sparrs = arrays_to_spvalues(spenv, arrs)
out = eval_sparse(call_jaxpr.jaxpr, call_jaxpr.consts, sparrs, spenv)
return spvalues_to_arrays(spenv, out)
jvp = lift_jvp(num_consts, jvp_jaxpr_fun)
invals = spvalues_to_arrays(spenv, spvalues)
outvals = jax.custom_derivatives.custom_jvp_call_p.bind(
lu.wrap_init(fun, debug_info=call_jaxpr.jaxpr.debug_info),
jvp, *invals, **params)
return arrays_to_spvalues(spenv, outvals)
sparse_rules_bcoo[jax.custom_derivatives.custom_jvp_call_p] = _custom_jvp_sparse_rule
sparse_rules_bcsr[jax.custom_derivatives.custom_jvp_call_p] = _custom_jvp_sparse_rule
# ------------------------------------------------------------------------------
# BCOO methods derived from sparsify
# defined here to avoid circular imports
def _sum(self, *args, **kwargs):
"""Sum array along axis."""
return sparsify(lambda x: x.sum(*args, **kwargs))(self)
def _reshape(self, *args, **kwargs):
"""Returns an array containing the same data with a new shape."""
return sparsify(lambda x: x.reshape(*args, **kwargs))(self)
def _astype(self, *args, **kwargs):
"""Copy the array and cast to a specified dtype."""
return sparsify(lambda x: x.astype(*args, **kwargs))(self)
def _bcoo_rewriting_take(arr, idx, indices_are_sorted=False, unique_indices=False,
mode=None, fill_value=None):
# Only sparsify the array argument; sparse indices not yet supported
result = sparsify(functools.partial(
jnp_indexing.rewriting_take, idx=idx, indices_are_sorted=indices_are_sorted,
mode=mode, unique_indices=unique_indices, fill_value=fill_value))(arr)
# Account for a corner case in the rewriting_take implementation.
if not isinstance(result, BCOO) and np.size(result) == 0:
result = BCOO.fromdense(result)
return result
def _sparse_iter(arr):
return iter(arr[i] for i in range(arr.shape[0]))
_swap_args = lambda f: lambda a, b: f(b, a)
_bcoo_methods = {
"astype": _astype,
"reshape": _reshape,
"sum": _sum,
"__abs__": sparsify(jnp.abs),
"__neg__": sparsify(jnp.negative),
"__pos__": sparsify(jnp.positive),
"__matmul__": sparsify(jnp.matmul),
"__rmatmul__": sparsify(_swap_args(jnp.matmul)),
"__mul__": sparsify(jnp.multiply),
"__rmul__": sparsify(_swap_args(jnp.multiply)),
"__truediv__": sparsify(jnp.divide),
"__rtruediv__": sparsify(_swap_args(jnp.divide)),
"__add__": sparsify(jnp.add),
"__radd__": sparsify(_swap_args(jnp.add)),
"__sub__": sparsify(jnp.subtract),
"__rsub__": sparsify(_swap_args(jnp.subtract)),
"__pow__": lambda x, y: sparsify(lambda x: jnp.power(x, y))(x),
"__rpow__": sparsify(_swap_args(jnp.power)),
"__getitem__": _bcoo_rewriting_take,
"__iter__": _sparse_iter,
"__gt__": sparsify(jnp.greater),
"__ge__": sparsify(jnp.greater_equal),
"__lt__": sparsify(jnp.less),
"__le__": sparsify(jnp.less_equal),
"__eq__": sparsify(jnp.equal),
"__ne__": sparsify(jnp.not_equal),
}
for method, impl in _bcoo_methods.items():
setattr(BCOO, method, impl)
# ------------------------------------------------------------------------------
# BCSR methods derived from sparsify
# defined here to avoid circular imports
def _bcsr_rewriting_take(arr, idx, indices_are_sorted=False, unique_indices=False,
mode=None, fill_value=None):
# Only sparsify the array argument; sparse indices not yet supported
result = sparsify(functools.partial(
jnp_indexing.rewriting_take, idx=idx, indices_are_sorted=indices_are_sorted,
mode=mode, unique_indices=unique_indices, fill_value=fill_value))(arr)
return result
_bcoo_methods = {
"__matmul__": sparsify(jnp.matmul),
"__rmatmul__": sparsify(_swap_args(jnp.matmul)),
"__getitem__": _bcsr_rewriting_take,
}
for method, impl in _bcoo_methods.items():
setattr(BCSR, method, impl)
|
SparseTrace
|
python
|
scipy__scipy
|
scipy/ndimage/tests/test_interpolation.py
|
{
"start": 48508,
"end": 54503
}
|
class ____:
@pytest.mark.parametrize('order', range(0, 6))
def test_zoom1(self, order, xp):
for z in [2, [2, 2]]:
arr = xp.reshape(xp.arange(25, dtype=xp.float64), (5, 5))
arr = ndimage.zoom(arr, z, order=order)
assert arr.shape == (10, 10)
assert xp.all(arr[-1, :] != 0)
assert xp.all(arr[-1, :] >= (20 - eps))
assert xp.all(arr[0, :] <= (5 + eps))
assert xp.all(arr >= (0 - eps))
assert xp.all(arr <= (24 + eps))
def test_zoom2(self, xp):
arr = xp.reshape(xp.arange(12), (3, 4))
out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
xp_assert_equal(out, arr)
def test_zoom3(self, xp):
arr = xp.asarray([[1, 2]])
out1 = ndimage.zoom(arr, (2, 1))
out2 = ndimage.zoom(arr, (1, 2))
assert_array_almost_equal(out1, xp.asarray([[1, 2], [1, 2]]))
assert_array_almost_equal(out2, xp.asarray([[1, 1, 2, 2]]))
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('dtype', ["float64", "complex128"])
def test_zoom_affine01(self, order, dtype, xp):
dtype = getattr(xp, dtype)
data = xp.asarray([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=dtype)
if xp.isdtype(data.dtype, 'complex floating'):
data -= 1j * data
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
'The behavior of affine_transform with a 1-D array .* '
'has changed', UserWarning)
out = ndimage.affine_transform(data, xp.asarray([0.5, 0.5]), 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_zoom_infinity(self, xp):
# Ticket #1419 regression test
dim = 8
ndimage.zoom(xp.zeros((dim, dim)), 1. / dim, mode='nearest')
def test_zoom_zoomfactor_one(self, xp):
# Ticket #1122 regression test
arr = xp.zeros((1, 5, 5))
zoom = (1.0, 2.0, 2.0)
out = ndimage.zoom(arr, zoom, cval=7)
ref = xp.zeros((1, 10, 10))
assert_array_almost_equal(out, ref)
def test_zoom_output_shape_roundoff(self, xp):
arr = xp.zeros((3, 11, 25))
zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
out = ndimage.zoom(arr, zoom)
assert out.shape == (4, 15, 29)
@pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
@pytest.mark.parametrize('mode', ['nearest', 'constant', 'wrap', 'reflect',
'mirror', 'grid-wrap', 'grid-mirror',
'grid-constant'])
def test_zoom_by_int_order0(self, zoom, mode, xp):
# order 0 zoom should be the same as replication via np.kron
# Note: This is not True for general x shapes when grid_mode is False,
# but works here for all modes because the size ratio happens to
# always be an integer when x.shape = (2, 2).
x_np = np.asarray([[0, 1],
[2, 3]], dtype=np.float64)
expected = np.kron(x_np, np.ones(zoom))
x = xp.asarray(x_np)
expected = xp.asarray(expected)
assert_array_almost_equal(
ndimage.zoom(x, zoom, order=0, mode=mode),
expected
)
@pytest.mark.parametrize('shape', [(2, 3), (4, 4)])
@pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
@pytest.mark.parametrize('mode', ['nearest', 'reflect', 'mirror',
'grid-wrap', 'grid-constant'])
def test_zoom_grid_by_int_order0(self, shape, zoom, mode, xp):
# When grid_mode is True, order 0 zoom should be the same as
# replication via np.kron. The only exceptions to this are the
# non-grid modes 'constant' and 'wrap'.
x_np = np.arange(np.prod(shape), dtype=float).reshape(shape)
x = xp.asarray(x_np)
assert_array_almost_equal(
ndimage.zoom(x, zoom, order=0, mode=mode, grid_mode=True),
xp.asarray(np.kron(x_np, np.ones(zoom)))
)
@pytest.mark.parametrize('mode', ['constant', 'wrap'])
def test_zoom_grid_mode_warnings(self, mode, xp):
# Warn on use of non-grid modes when grid_mode is True
x = xp.reshape(xp.arange(9, dtype=xp.float64), (3, 3))
with pytest.warns(UserWarning,
match="It is recommended to use mode"):
ndimage.zoom(x, 2, mode=mode, grid_mode=True),
@skip_xp_backends("dask.array", reason="output=array requires buffer view")
@skip_xp_backends("jax.numpy", reason="output=array requires buffer view")
def test_zoom_output_shape(self, xp):
"""Ticket #643"""
x = xp.reshape(xp.arange(12), (3, 4))
ndimage.zoom(x, 2, output=xp.zeros((6, 8)))
def test_zoom_0d_array(self, xp):
# Ticket #21670 regression test
a = xp.arange(10.)
factor = 2
actual = ndimage.zoom(a, np.array(factor))
expected = ndimage.zoom(a, factor)
xp_assert_close(actual, expected)
@xfail_xp_backends("cupy", reason="CuPy `zoom` needs similar fix.")
def test_zoom_1_gh20999(self, xp):
# gh-20999 reported that zoom with `zoom=1` (or sequence of ones)
# introduced noise. Check that this is resolved.
x = xp.eye(3)
xp_assert_equal(ndimage.zoom(x, 1), x)
xp_assert_equal(ndimage.zoom(x, (1, 1)), x)
@xfail_xp_backends("cupy", reason="CuPy `zoom` needs similar fix.")
@skip_xp_backends("jax.numpy", reason="read-only backend")
@xfail_xp_backends("dask.array", reason="numpy round-trip")
def test_zoom_1_gh20999_output(self, xp):
x = xp.eye(3)
output = xp.zeros_like(x)
ndimage.zoom(x, 1, output=output)
xp_assert_equal(output, x)
@make_xp_test_case(ndimage.rotate)
|
TestZoom
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py
|
{
"start": 14733,
"end": 18316
}
|
class ____(Qwen2_5OmniThinkerConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3OmniMoeThinker`]. It is used to instantiate a
Qwen3-Omni-Thinker model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the thinker component of the Qwen3-Omni
architecture.
e.g. [Qwen/Qwen3-Omni-7B](https://huggingface.co/Qwen/Qwen3-Omni-7B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
audio_config (`dict`, *optional*):
The config dictionary of the audio backbone.
vision_config (`dict`, *optional*):
The config dictionary of the vision backbone.
text_config (`dict`, *optional*):
The config dictionary of the text backbone.
audio_token_id (`int`, *optional*, defaults to 151646):
The audio token id to encode the audio prompt.
image_token_id (`int`, *optional*, defaults to 151655):
The image token id to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 151656):
The video token id to encode the video prompt.
position_id_per_seconds (`int`, *optional*, defaults to 25):
The increment of position id per second.
audio_start_token_id (`int`, *optional*, defaults to 151647):
The audio start token id to encode the audio prompt.
user_token_id (`int`, *optional*, defaults to 872):
The user token id to encode the user token.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import Qwen3OmniMoeThinkerModel, Qwen3OmniMoeThinkerConfig
>>> # Initializing a default Qwen3OmniMoeThinkerConfig
>>> configuration = Qwen3OmniMoeThinkerConfig()
>>> # Initializing a model (with random weights) from the default configuration
>>> model = Qwen3OmniMoeThinkerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3_omni_moe_thinker"
# Override parent's attribute_map as we use audio_token_id directly, not audio_token_index
attribute_map = {}
def __init__(
self,
audio_config=None,
vision_config=None,
text_config=None,
audio_token_id=151646,
image_token_id=151655,
video_token_id=151656,
position_id_per_seconds=25,
audio_start_token_id=151647,
user_token_id=872,
initializer_range=0.02,
**kwargs,
):
super().__init__(
audio_config,
vision_config,
text_config,
None,
None,
None,
position_id_per_seconds,
None,
audio_start_token_id,
None,
user_token_id,
initializer_range,
**kwargs,
)
del self.seconds_per_chunk
del self.audio_token_index
del self.image_token_index
del self.video_token_index
del self.audio_end_token_id
self.audio_token_id = audio_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
|
Qwen3OmniMoeThinkerConfig
|
python
|
spyder-ide__spyder
|
spyder/plugins/shortcuts/widgets/table.py
|
{
"start": 24115,
"end": 31604
}
|
class ____(HoverRowsTableView):
def __init__(self, parent=None):
HoverRowsTableView.__init__(self, parent, custom_delegate=True)
self._parent = parent
self.finder = None
self.shortcut_data: List[ShortcutData] = []
self.source_model = ShortcutsModel(self)
self.proxy_model = ShortcutsSortFilterProxy(self)
self.last_regex = ''
self.proxy_model.setSourceModel(self.source_model)
self.proxy_model.setDynamicSortFilter(True)
self.proxy_model.setFilterByColumn(CONTEXT)
self.proxy_model.setFilterByColumn(NAME)
self.proxy_model.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.setModel(self.proxy_model)
self.hideColumn(SEARCH_SCORE)
self.setItemDelegate(HTMLDelegate(self, margin=9))
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSortingEnabled(True)
self.setEditTriggers(QAbstractItemView.AllEditTriggers)
self.verticalHeader().hide()
# To highlight the entire row on hover
self.sig_hover_index_changed.connect(
self.itemDelegate().on_hover_index_changed
)
def set_shortcut_data(self, shortcut_data):
"""
Shortcut data comes from the registration of actions on the main
window. This allows to only display the right actions on the
shortcut table. This also allows to display the localize text.
"""
self.shortcut_data = shortcut_data
def focusOutEvent(self, e):
"""Qt Override."""
self.source_model.update_active_row()
super().focusOutEvent(e)
def focusInEvent(self, e):
"""Qt Override."""
super().focusInEvent(e)
self.selectRow(self.currentIndex().row())
def adjust_cells(self):
"""Adjust column size based on contents."""
self.resizeColumnsToContents()
fm = self.horizontalHeader().fontMetrics()
names = [fm.width(s.name + ' '*9) for s in self.source_model.shortcuts]
if len(names) == 0:
# This condition only applies during testing
names = [0]
self.setColumnWidth(NAME, max(names))
self.horizontalHeader().setStretchLastSection(True)
def load_shortcuts(self):
"""Load shortcuts and assign to table model."""
# Data might be capitalized so we use lower() below.
# See: spyder-ide/spyder/#12415
shortcut_data = {
(data.context.lower(), data.name.lower()): (
data.plugin_name
if data.plugin_name is not None
else data.plugin_name
)
for data in self.shortcut_data
}
shortcuts = []
for context, name, keystr in CONF.iter_shortcuts():
if (context, name) in shortcut_data:
context = context.lower()
name = name.lower()
plugin_name = shortcut_data[(context, name)]
shortcut = Shortcut(context, name, keystr, plugin_name)
shortcuts.append(shortcut)
shortcuts = sorted(
shortcuts, key=lambda item: item.context + item.name
)
# Store the original order of shortcuts
for i, shortcut in enumerate(shortcuts):
shortcut.index = i
self.source_model.shortcuts = shortcuts
self.source_model.scores = [0]*len(shortcuts)
self.source_model.rich_text = [s.name for s in shortcuts]
self.source_model.reset()
self.adjust_cells()
self.sortByColumn(CONTEXT, Qt.AscendingOrder)
def check_shortcuts(self):
"""Check shortcuts for conflicts."""
conflicts = []
for index, sh1 in enumerate(self.source_model.shortcuts):
if index == len(self.source_model.shortcuts)-1:
break
if str(sh1.key) == '':
continue
for sh2 in self.source_model.shortcuts[index+1:]:
if sh2 is sh1:
continue
if str(sh2.key) == str(sh1.key) \
and (sh1.context == sh2.context or sh1.context == '_' or
sh2.context == '_'):
conflicts.append((sh1, sh2))
if conflicts:
if self.parent() is not None:
self.parent().show_this_page.emit()
cstr = "\n".join(['%s <---> %s' % (sh1, sh2)
for sh1, sh2 in conflicts])
QMessageBox.warning(self, _("Conflicts"),
_("The following conflicts have been "
"detected:")+"\n"+cstr, QMessageBox.Ok)
def save_shortcuts(self):
"""Save shortcuts from table model."""
self.check_shortcuts()
for shortcut in self.source_model.shortcuts:
shortcut.save()
def show_editor(self):
"""Create, setup and display the shortcut editor dialog."""
index = self.proxy_model.mapToSource(self.currentIndex())
row = index.row()
shortcuts = self.source_model.shortcuts
context = shortcuts[row].context
name = shortcuts[row].name
sequence_index = self.source_model.index(row, SEQUENCE)
sequence = sequence_index.data()
dialog = ShortcutEditor(self, context, name, sequence, shortcuts)
if dialog.exec_():
new_sequence = dialog.new_sequence
self.source_model.setData(sequence_index, new_sequence)
def set_regex(self, regex=None, reset=False):
"""Update the regex text for the shortcut finder."""
if reset:
text = ''
else:
text = self.finder.text().replace(' ', '').lower()
self.proxy_model.set_filter(text)
self.source_model.update_search_letters(text)
self.sortByColumn(SEARCH_SCORE, Qt.AscendingOrder)
if self.last_regex != regex:
self.selectRow(0)
self.last_regex = regex
def next_row(self):
"""Move to next row from currently selected row."""
row = self.currentIndex().row()
rows = self.proxy_model.rowCount()
if row + 1 == rows:
row = -1
self.selectRow(row + 1)
def previous_row(self):
"""Move to previous row from currently selected row."""
row = self.currentIndex().row()
rows = self.proxy_model.rowCount()
if row == 0:
row = rows
self.selectRow(row - 1)
def keyPressEvent(self, event):
"""Qt Override."""
key = event.key()
if key in [Qt.Key_Enter, Qt.Key_Return]:
self.show_editor()
elif key in [Qt.Key_Tab]:
self.finder.setFocus()
elif key in [Qt.Key_Backtab]:
self.parent().reset_btn.setFocus()
elif key in [Qt.Key_Up, Qt.Key_Down, Qt.Key_Left, Qt.Key_Right]:
super().keyPressEvent(event)
elif key not in [Qt.Key_Escape, Qt.Key_Space]:
text = event.text()
if text:
if re.search(VALID_FINDER_CHARS, text) is not None:
self.finder.setFocus()
self.finder.setText(self.finder.text() + text.strip())
elif key in [Qt.Key_Escape]:
self.finder.keyPressEvent(event)
def mouseDoubleClickEvent(self, event):
"""Qt Override."""
self.show_editor()
|
ShortcutsTable
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/sparse_ops/sparse_ops_test.py
|
{
"start": 21749,
"end": 30884
}
|
class ____(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self, dtype=np.int32):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64), np.array(val, dtype), np.array(
shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def _SparseTensor_String5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
def testFillNumber(self):
with test_util.use_gpu():
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool_))
def testSparseFillEmptyRowsGradEmpty(self):
with test_util.use_gpu():
grad, _ = self.evaluate(
sparse_ops.sparse_fill_empty_rows_grad(
reverse_index_map=[], grad_values=[]))
self.assertAllEqual(grad, [])
def testSparseFillEmptyRowsGradInvalidReverseIndexMap(self):
# On CPU, invalid indices raise assertion. On GPU, invalid indices
# are simply ignored, for performance reasons.
with test_util.use_gpu():
if test_util.is_gpu_available():
grad, _ = self.evaluate(
sparse_ops.sparse_fill_empty_rows_grad(
reverse_index_map=[-1, 3], grad_values=[]
)
)
self.assertAllEqual(grad, [0., 0.])
else:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Elements in reverse index must be in .*",
):
self.evaluate(
sparse_ops.sparse_fill_empty_rows_grad(
reverse_index_map=[-1, 3], grad_values=[]
)
)
@test_util.run_deprecated_v1
def testFillFloat(self):
with self.session():
values = constant_op.constant(
[0.0, 10.0, 13.0, 14.0, 32.0, 33.0], dtype=dtypes.float64)
default_value = constant_op.constant(-1.0, dtype=dtypes.float64)
sp_input = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]),
values=values,
dense_shape=np.array([5, 6]))
sp_output, empty_row_indicator = (sparse_ops.sparse_fill_empty_rows(
sp_input, default_value))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4],
[2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllClose(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool_))
values_grad_err = gradient_checker.compute_gradient_error(
values, values.shape.as_list(), sp_output.values, [8], delta=1e-8)
self.assertGreater(values_grad_err, 0)
self.assertLess(values_grad_err, 1e-8)
default_value_grad_err = gradient_checker.compute_gradient_error(
default_value,
default_value.shape.as_list(),
sp_output.values, [8],
delta=1e-8)
self.assertGreater(default_value_grad_err, 0)
self.assertLess(default_value_grad_err, 1e-8)
def testFillString(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values,
[b"a", b"b", b"c", b"d", b"", b"e", b"f", b""])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool_))
def testNoEmptyRows(self):
with test_util.use_gpu():
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.dense_shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool_))
def testNoEmptyRowsAndUnordered(self):
with test_util.use_gpu():
sp_input = sparse_tensor.SparseTensor(
indices=np.array([[1, 2], [1, 3], [0, 1], [0, 3]]),
values=np.array([1, 3, 2, 4]),
dense_shape=np.array([2, 5]))
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 1], [0, 3], [1, 2], [1, 3]])
self.assertAllEqual(output.values, [2, 4, 1, 3])
self.assertAllEqual(output.dense_shape, [2, 5])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool_))
def testUnordered(self):
with test_util.use_gpu():
sp_input = sparse_tensor.SparseTensor(
indices=np.array([[2, 3], [2, 2], [0, 1], [0, 3]]),
values=np.array([1, 3, 2, 4]),
dense_shape=np.array([3, 5]))
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices,
[[0, 1], [0, 3], [1, 0], [2, 3], [2, 2]])
self.assertAllEqual(output.values, [2, 4, -1, 1, 3])
self.assertAllEqual(output.dense_shape, [3, 5])
self.assertAllEqual(empty_row_indicator_out, [False, True, False])
def testEmptyIndicesTensor(self):
with test_util.use_gpu():
sp_input = sparse_tensor.SparseTensor(
indices=np.ones([0, 2]),
values=np.ones([0]),
dense_shape=np.array([2, 5]))
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0]])
self.assertAllEqual(output.values, [-1, -1])
self.assertAllEqual(output.dense_shape, [2, 5])
self.assertAllEqual(empty_row_indicator_out, np.ones(2).astype(np.bool_))
def testEmptyOutput(self):
with test_util.use_gpu():
sp_input = sparse_tensor.SparseTensor(
indices=np.ones([0, 2]),
values=np.ones([0]),
dense_shape=np.array([0, 3]))
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, np.ones([0, 2]))
self.assertAllEqual(output.values, np.ones([0]))
self.assertAllEqual(output.dense_shape, [0, 3])
self.assertAllEqual(empty_row_indicator_out, [])
def testInvalidIndices(self):
with test_util.use_gpu():
sp_input = sparse_tensor.SparseTensor(
indices=np.array([[1, 2], [1, 3], [99, 1], [99, 3]]),
values=np.array([1, 3, 2, 4]),
dense_shape=np.array([2, 5]))
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"indices\(2, 0\) is invalid"):
self.evaluate(sparse_ops.sparse_fill_empty_rows(sp_input, -1))
|
SparseFillEmptyRowsTest
|
python
|
ray-project__ray
|
python/ray/dag/tests/experimental/test_torch_tensor_transport.py
|
{
"start": 6200,
"end": 9055
}
|
class ____:
"""Tests driver to worker tensor transport with GPU device."""
def create_and_execute_dag(self, actor, device, tensor_input, is_dict=False):
"""Create a DAG with tensor transport and execute it."""
with InputNode() as inp:
method = actor.echo_dict_device if is_dict else actor.echo_device
dag = method.bind(inp.with_tensor_transport(device=device))
compiled_dag = dag.experimental_compile()
return compiled_dag.execute(tensor_input)
def test_src_cpu_tensor_dst_cpu_node(self, ray_start_regular):
actor = Actor.remote()
ref = run_driver_to_worker_dag(actor, "cuda", torch.tensor([1]))
with pytest.raises(
RayTaskError, match="RuntimeError: No CUDA GPUs are available"
):
ray.get(ref)
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_gpu_tensor_dst_cpu_node(self, ray_start_regular):
actor = Actor.remote()
ref = run_driver_to_worker_dag(actor, "cuda", torch.tensor([1], device="cuda"))
with pytest.raises(
RayTaskError, match="RuntimeError: No CUDA GPUs are available"
):
ray.get(ref)
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_cpu_tensor_dst_gpu_node(self, ray_start_regular):
actor = Actor.options(num_gpus=1).remote()
ref = run_driver_to_worker_dag(actor, "cuda", torch.tensor([1]))
assert ray.get(ref) == "cuda:0"
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_gpu_tensor_dst_gpu_node(self, ray_start_regular):
actor = Actor.options(num_gpus=1).remote()
ref = run_driver_to_worker_dag(actor, "cuda", torch.tensor([1], device="cuda"))
assert ray.get(ref) == "cuda:0"
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_mix_tensors_dst_cpu_node(self, ray_start_regular):
actor = Actor.remote()
tensor_dict = {
"cpu_tensor": torch.tensor([1]),
"gpu_tensor": torch.tensor([1], device="cuda"),
}
ref = run_driver_to_worker_dag(actor, "cuda", tensor_dict, is_dict=True)
with pytest.raises(
RayTaskError, match="RuntimeError: No CUDA GPUs are available"
):
ray.get(ref)
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_mix_tensors_dst_gpu_node(self, ray_start_regular):
actor = Actor.options(num_gpus=1).remote()
tensor_dict = {
"cpu_tensor": torch.tensor([1]),
"gpu_tensor": torch.tensor([1], device="cuda"),
}
ref = run_driver_to_worker_dag(actor, "cuda", tensor_dict, is_dict=True)
assert ray.get(ref) == {"cpu_tensor": "cuda:0", "gpu_tensor": "cuda:0"}
|
TestDriverToWorkerDeviceGPU
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/nn_ops/lrn_op_test.py
|
{
"start": 1312,
"end": 6841
}
|
class ____(test.TestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
rows = input_image.shape[1]
cols = input_image.shape[2]
depth = input_image.shape[3]
for b in range(batch_size):
for r in range(rows):
for c in range(cols):
for d in range(depth):
begin = max(0, d - lrn_depth_radius)
end = min(depth, d + lrn_depth_radius + 1)
patch = input_image[b, r, c, begin:end]
output[b, r, c, d] /= (
np.power(bias + alpha * np.sum(patch * patch), beta))
return output
def _RunAndVerify(self, dtype):
with self.cached_session():
# random shape
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
p = array_ops.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 2.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 2.0 * np.random.rand()
lrn_t = nn.local_response_normalization(
p,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
params[p],
lrn_depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
err)
if dtype == dtypes.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
self.assertShapeEqual(expected, lrn_t)
@test_util.run_deprecated_v1
def testCompute(self):
for _ in range(2):
self._RunAndVerify(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerify(dtypes.float16)
@test_util.run_deprecated_v1
def testGradientsZeroInput(self):
with self.session():
shape = [4, 4, 4, 4]
p = array_ops.placeholder(dtypes.float32, shape=shape)
inp_array = np.zeros(shape).astype("f")
lrn_op = nn.local_response_normalization(p, 2, 1.0, 0.0, 1.0, name="lrn")
grad = gradients_impl.gradients([lrn_op], [p])[0]
params = {p: inp_array}
r = grad.eval(feed_dict=params)
expected = np.ones(shape).astype("f")
self.assertAllClose(r, expected)
self.assertShapeEqual(expected, grad)
@test_util.run_in_graph_and_eager_modes
def testIncompatibleInputAndOutputImageShapes(self):
depth_radius = 1
bias = 1.59018219
alpha = 0.117728651
beta = 0.404427052
input_grads = random_ops.random_uniform(
shape=[4, 4, 4, 4],
minval=-10000,
maxval=10000,
dtype=dtypes.float32,
seed=-2033)
input_image = random_ops.random_uniform(
shape=[4, 4, 4, 4],
minval=-10000,
maxval=10000,
dtype=dtypes.float32,
seed=-2033)
invalid_output_image = random_ops.random_uniform(
shape=[4, 4, 4, 4, 4, 4],
minval=-10000,
maxval=10000,
dtype=dtypes.float32,
seed=-2033)
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
self.evaluate(
nn.lrn_grad(
input_grads=input_grads,
input_image=input_image,
output_image=invalid_output_image,
depth_radius=depth_radius,
bias=bias,
alpha=alpha,
beta=beta))
def _RunAndVerifyGradients(self, dtype):
with self.cached_session():
# random shape
shape = np.random.randint(1, 5, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 1.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 1.0 * np.random.rand()
if dtype == dtypes.float32:
inp_array = np.random.rand(*shape).astype(np.float32)
else:
inp_array = np.random.rand(*shape).astype(np.float16)
inp = constant_op.constant(
list(inp_array.ravel(order="C")), shape=shape, dtype=dtype)
lrn_op = nn.local_response_normalization(
inp,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = gradient_checker.compute_gradient_error(inp, shape, lrn_op, shape)
print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta,
" is ", err)
if dtype == dtypes.float32:
self.assertLess(err, 1e-4)
else:
self.assertLess(err, 1.0)
@test_util.run_deprecated_v1
def testGradients(self):
for _ in range(2):
self._RunAndVerifyGradients(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerifyGradients(dtypes.float16)
if __name__ == "__main__":
test.main()
|
LRNOpTest
|
python
|
eriklindernoren__ML-From-Scratch
|
mlfromscratch/supervised_learning/random_forest.py
|
{
"start": 495,
"end": 3683
}
|
class ____():
"""Random Forest classifier. Uses a collection of classification trees that
trains on random subsets of the data using a random subsets of the features.
Parameters:
-----------
n_estimators: int
The number of classification trees that are used.
max_features: int
The maximum number of features that the classification trees are allowed to
use.
min_samples_split: int
The minimum number of samples needed to make a split when building a tree.
min_gain: float
The minimum impurity required to split the tree further.
max_depth: int
The maximum depth of a tree.
"""
def __init__(self, n_estimators=100, max_features=None, min_samples_split=2,
min_gain=0, max_depth=float("inf")):
self.n_estimators = n_estimators # Number of trees
self.max_features = max_features # Maxmimum number of features per tree
self.min_samples_split = min_samples_split
self.min_gain = min_gain # Minimum information gain req. to continue
self.max_depth = max_depth # Maximum depth for tree
self.progressbar = progressbar.ProgressBar(widgets=bar_widgets)
# Initialize decision trees
self.trees = []
for _ in range(n_estimators):
self.trees.append(
ClassificationTree(
min_samples_split=self.min_samples_split,
min_impurity=min_gain,
max_depth=self.max_depth))
def fit(self, X, y):
n_features = np.shape(X)[1]
# If max_features have not been defined => select it as
# sqrt(n_features)
if not self.max_features:
self.max_features = int(math.sqrt(n_features))
# Choose one random subset of the data for each tree
subsets = get_random_subsets(X, y, self.n_estimators)
for i in self.progressbar(range(self.n_estimators)):
X_subset, y_subset = subsets[i]
# Feature bagging (select random subsets of the features)
idx = np.random.choice(range(n_features), size=self.max_features, replace=True)
# Save the indices of the features for prediction
self.trees[i].feature_indices = idx
# Choose the features corresponding to the indices
X_subset = X_subset[:, idx]
# Fit the tree to the data
self.trees[i].fit(X_subset, y_subset)
def predict(self, X):
y_preds = np.empty((X.shape[0], len(self.trees)))
# Let each tree make a prediction on the data
for i, tree in enumerate(self.trees):
# Indices of the features that the tree has trained on
idx = tree.feature_indices
# Make a prediction based on those features
prediction = tree.predict(X[:, idx])
y_preds[:, i] = prediction
y_pred = []
# For each sample
for sample_predictions in y_preds:
# Select the most common class prediction
y_pred.append(np.bincount(sample_predictions.astype('int')).argmax())
return y_pred
|
RandomForest
|
python
|
crytic__slither
|
slither/solc_parsing/declarations/event_contract.py
|
{
"start": 373,
"end": 2031
}
|
class ____:
"""
EventContract class
"""
def __init__(
self, event: EventContract, event_data: Dict, contract_parser: "ContractSolc"
) -> None:
self._event = event
self._contract_parser = contract_parser
if self.is_compact_ast:
self._event.name = event_data["name"]
elems = event_data["parameters"]
assert elems["nodeType"] == "ParameterList"
self._elemsNotParsed = elems["parameters"]
else:
self._event.name = event_data["attributes"]["name"]
for elem in event_data["children"]:
# From Solidity 0.6.3 to 0.6.10 (included)
# Comment above a event might be added in the children
# of an event for the legacy ast
if elem["name"] == "ParameterList":
if "children" in elem:
self._elemsNotParsed = elem["children"]
else:
self._elemsNotParsed = []
@property
def is_compact_ast(self) -> bool:
return self._contract_parser.is_compact_ast
def analyze(self) -> None:
for elem_to_parse in self._elemsNotParsed:
elem = EventVariable()
# Todo: check if the source offset is always here
if "src" in elem_to_parse:
elem.set_offset(elem_to_parse["src"], self._contract_parser.compilation_unit)
elem_parser = EventVariableSolc(elem, elem_to_parse)
elem_parser.analyze(self._contract_parser)
self._event.elems.append(elem)
self._elemsNotParsed = []
|
EventContractSolc
|
python
|
pytorch__pytorch
|
torch/utils/weak.py
|
{
"start": 11545,
"end": 12267
}
|
class ____:
"""Wrapper around a weak ref of a Tensor that handles the _fix_weakref() call required when unwrapping a Tensor weakref."""
ref: WeakRef[Tensor]
def __init__(self, tensor: Tensor) -> None:
if not isinstance(tensor, Tensor):
raise AssertionError(f"expected torch.Tensor, got {type(tensor)}.")
self.ref = weakref.ref(tensor)
def __call__(self):
out = self.ref()
if out is None:
return out
if not isinstance(out, Tensor):
raise AssertionError(f"expected torch.Tensor, got {type(out)}.")
# TODO, add _fix_weakref type binding
out._fix_weakref() # type: ignore[attr-defined]
return out
|
TensorWeakRef
|
python
|
django-extensions__django-extensions
|
django_extensions/management/jobs.py
|
{
"start": 205,
"end": 404
}
|
class ____:
help = "undefined job description."
when = None # type: Optional[str]
def execute(self):
raise NotImplementedError("Job needs to implement the execute method")
|
BaseJob
|
python
|
wandb__wandb
|
wandb/wandb_agent.py
|
{
"start": 5274,
"end": 18821
}
|
class ____:
POLL_INTERVAL = 5
REPORT_INTERVAL = 0
KILL_DELAY = 30
FLAPPING_MAX_SECONDS = 60
FLAPPING_MAX_FAILURES = 3
MAX_INITIAL_FAILURES = 5
DEFAULT_SWEEP_COMMAND: List[str] = [
"${env}",
"${interpreter}",
"${program}",
"${args}",
]
SWEEP_COMMAND_ENV_VAR_REGEX = re.compile(r"\$\{envvar\:([A-Z0-9_]*)\}")
def __init__(
self, api, queue, sweep_id=None, function=None, in_jupyter=None, count=None
):
self._api = api
self._queue = queue
self._run_processes = {} # keyed by run.id (GQL run name)
self._server_responses = []
self._sweep_id = sweep_id
self._in_jupyter = in_jupyter
self._log = []
self._running = True
self._last_report_time = None
self._function = function
self._report_interval = wandb.env.get_agent_report_interval(
self.REPORT_INTERVAL
)
self._kill_delay = wandb.env.get_agent_kill_delay(self.KILL_DELAY)
self._finished = 0
self._failed = 0
self._count = count
self._sweep_command = []
self._max_initial_failures = wandb.env.get_agent_max_initial_failures(
self.MAX_INITIAL_FAILURES
)
if self._report_interval is None:
raise AgentError("Invalid agent report interval")
if self._kill_delay is None:
raise AgentError("Invalid agent kill delay")
# if the directory to log to is not set, set it
if os.environ.get("WANDB_DIR") is None:
os.environ["WANDB_DIR"] = os.path.abspath(os.getcwd())
def is_flapping(self):
"""Determine if the process is flapping.
Flapping occurs if the agents receives FLAPPING_MAX_FAILURES non-0 exit codes in
the first FLAPPING_MAX_SECONDS.
"""
if os.getenv(wandb.env.AGENT_DISABLE_FLAPPING) == "true":
return False
if time.time() < wandb.START_TIME + self.FLAPPING_MAX_SECONDS:
return self._failed >= self.FLAPPING_MAX_FAILURES
def is_failing(self):
return (
self._failed >= self._finished
and self._max_initial_failures <= self._failed
)
def run(self): # noqa: C901
# TODO: catch exceptions, handle errors, show validation warnings, and make more generic
import yaml
sweep_obj = self._api.sweep(self._sweep_id, "{}")
if sweep_obj:
sweep_yaml = sweep_obj.get("config")
if sweep_yaml:
sweep_config = yaml.safe_load(sweep_yaml)
if sweep_config:
sweep_command = sweep_config.get("command")
if sweep_command and isinstance(sweep_command, list):
self._sweep_command = sweep_command
# TODO: include sweep ID
agent = self._api.register_agent(socket.gethostname(), sweep_id=self._sweep_id)
agent_id = agent["id"]
try:
while self._running:
commands = util.read_many_from_queue(
self._queue, 100, self.POLL_INTERVAL
)
for command in commands:
command["resp_queue"].put(self._process_command(command))
now = util.stopwatch_now()
if self._last_report_time is None or (
self._report_interval != 0
and now > self._last_report_time + self._report_interval
):
logger.info("Running runs: %s", list(self._run_processes.keys()))
self._last_report_time = now
run_status = {}
for run_id, run_process in list(self._run_processes.items()):
poll_result = run_process.poll()
if poll_result is None:
run_status[run_id] = True
continue
elif (
not isinstance(poll_result, bool)
and isinstance(poll_result, int)
and poll_result > 0
):
self._failed += 1
if self.is_flapping():
logger.error(
"Detected %i failed runs in the first %i seconds, shutting down.",
self.FLAPPING_MAX_FAILURES,
self.FLAPPING_MAX_SECONDS,
)
logger.info(
"To disable this check set WANDB_AGENT_DISABLE_FLAPPING=true"
)
self._running = False
break
if self.is_failing():
logger.error(
"Detected %i failed runs in a row, shutting down.",
self._max_initial_failures,
)
logger.info(
"To change this value set WANDB_AGENT_MAX_INITIAL_FAILURES=val"
)
self._running = False
break
logger.info("Cleaning up finished run: %s", run_id)
# wandb.teardown() was added with wandb service and is a hammer to make
# sure that active runs are finished before moving on to another agent run
#
# In the future, a lighter weight way to implement this could be to keep a
# service process open for all the agent instances and inform_finish when
# the run should be marked complete. This however could require
# inform_finish on every run created by this process.
if hasattr(wandb, "teardown"):
exit_code = 0
if isinstance(poll_result, int):
exit_code = poll_result
elif isinstance(poll_result, bool):
exit_code = -1
wandb.teardown(exit_code)
del self._run_processes[run_id]
self._last_report_time = None
self._finished += 1
if self._count and self._finished >= self._count or not self._running:
self._running = False
continue
commands = self._api.agent_heartbeat(agent_id, {}, run_status)
# TODO: send _server_responses
self._server_responses = []
for command in commands:
self._server_responses.append(self._process_command(command))
except KeyboardInterrupt:
try:
wandb.termlog(
"Ctrl-c pressed. Waiting for runs to end. Press ctrl-c again to terminate them."
)
for _, run_process in self._run_processes.items():
run_process.wait()
except KeyboardInterrupt:
pass
finally:
try:
if not self._in_jupyter:
wandb.termlog("Terminating and syncing runs. Press ctrl-c to kill.")
for _, run_process in self._run_processes.items():
try:
run_process.terminate()
except OSError:
pass # if process is already dead
for _, run_process in self._run_processes.items():
run_process.wait()
except KeyboardInterrupt:
wandb.termlog("Killing runs and quitting.")
for _, run_process in self._run_processes.items():
try:
run_process.kill()
except OSError:
pass # if process is already dead
def _process_command(self, command):
logger.info(
"Agent received command: %s"
% (command["type"] if "type" in command else "Unknown")
)
response = {
"id": command.get("id"),
"result": None,
}
try:
command_type = command["type"]
if command_type == "run":
result = self._command_run(command)
elif command_type == "stop":
result = self._command_stop(command)
elif command_type == "exit":
result = self._command_exit(command)
elif command_type == "resume":
result = self._command_run(command)
else:
raise AgentError(f"No such command: {command_type}") # noqa: TRY301
response["result"] = result
except Exception:
logger.exception("Exception while processing command: %s", command)
ex_type, ex, tb = sys.exc_info()
response["exception"] = f"{ex_type.__name__}: {str(ex)}"
response["traceback"] = traceback.format_tb(tb)
del tb
self._log.append((command, response))
return response
def _command_run(self, command):
from wandb.sdk.launch.sweeps import utils as sweep_utils
logger.info(
"Agent starting run with config:\n"
+ "\n".join(
["\t{}: {}".format(k, v["value"]) for k, v in command["args"].items()]
)
)
if self._in_jupyter:
wandb.termlog(
f"Agent Starting Run: {command.get('run_id')} with config:\n"
+ "\n".join(
[f"\t{k}: {v['value']}" for k, v in command["args"].items()]
)
)
# Setup sweep command
sweep_command: List[str] = sweep_utils.create_sweep_command(self._sweep_command)
run_id = command.get("run_id")
sweep_id = os.environ.get(wandb.env.SWEEP_ID)
# TODO(jhr): move into settings
config_file = os.path.join(
"wandb", "sweep-" + sweep_id, "config-" + run_id + ".yaml"
)
json_file = os.path.join(
"wandb", "sweep-" + sweep_id, "config-" + run_id + ".json"
)
os.environ[wandb.env.RUN_ID] = run_id
base_dir = os.environ.get(wandb.env.DIR, "")
sweep_param_path = os.path.join(base_dir, config_file)
os.environ[wandb.env.SWEEP_PARAM_PATH] = sweep_param_path
config_util.save_config_file_from_dict(sweep_param_path, command["args"])
env = dict(os.environ)
sweep_vars: Dict[str, Any] = sweep_utils.create_sweep_command_args(command)
if "${args_json_file}" in sweep_command:
with open(json_file, "w") as fp:
fp.write(sweep_vars["args_json"][0])
if self._function:
# make sure that each run regenerates setup singleton
wandb.teardown()
proc = AgentProcess(
function=self._function,
env=env,
run_id=run_id,
in_jupyter=self._in_jupyter,
)
else:
sweep_vars["interpreter"] = ["python"]
sweep_vars["program"] = [command["program"]]
sweep_vars["args_json_file"] = [json_file]
if not platform.system() == "Windows":
sweep_vars["env"] = ["/usr/bin/env"]
command_list = []
for c in sweep_command:
c = str(c)
if c.startswith("${") and c.endswith("}"):
replace_list = sweep_vars.get(c[2:-1])
command_list += replace_list or []
else:
command_list += [c]
logger.info(
"About to run command: {}".format(
" ".join(f'"{c}"' if " " in c else c for c in command_list)
)
)
proc = AgentProcess(command=command_list, env=env)
self._run_processes[run_id] = proc
# we keep track of when we sent the sigterm to give processes a chance
# to handle the signal before sending sigkill every heartbeat
self._run_processes[run_id].last_sigterm_time = None
self._last_report_time = None
def _command_stop(self, command):
run_id = command["run_id"]
if run_id in self._run_processes:
proc = self._run_processes[run_id]
now = util.stopwatch_now()
if proc.last_sigterm_time is None:
proc.last_sigterm_time = now
logger.info("Stop: %s", run_id)
try:
proc.terminate()
except OSError: # if process is already dead
pass
elif now > proc.last_sigterm_time + self._kill_delay:
logger.info("Kill: %s", run_id)
try:
proc.kill()
except OSError: # if process is already dead
pass
else:
logger.error("Run %s not running", run_id)
def _command_exit(self, command):
logger.info("Received exit command. Killing runs and quitting.")
for _, proc in self._run_processes.items():
try:
proc.kill()
except OSError:
# process is already dead
pass
self._running = False
|
Agent
|
python
|
pytest-dev__pytest
|
src/_pytest/warning_types.py
|
{
"start": 2097,
"end": 2272
}
|
class ____(PytestWarning):
"""Warning emitted on use of unknown markers.
See :ref:`mark` for details.
"""
__module__ = "pytest"
@final
|
PytestUnknownMarkWarning
|
python
|
huggingface__transformers
|
src/transformers/models/auto/modeling_auto.py
|
{
"start": 86992,
"end": 87245
}
|
class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
AutoModelForSemanticSegmentation = auto_class_update(
AutoModelForSemanticSegmentation, head_doc="semantic segmentation"
)
|
AutoModelForSemanticSegmentation
|
python
|
pandas-dev__pandas
|
pandas/tests/window/test_numba.py
|
{
"start": 20618,
"end": 22246
}
|
class ____:
@pytest.mark.parametrize(
"is_max, has_nan, exp_list",
[
(True, False, [3.0, 5.0, 2.0, 5.0, 1.0, 5.0, 6.0, 7.0, 8.0, 9.0]),
(True, True, [3.0, 4.0, 2.0, 4.0, 1.0, 4.0, 6.0, 7.0, 7.0, 9.0]),
(False, False, [3.0, 2.0, 2.0, 1.0, 1.0, 0.0, 0.0, 0.0, 7.0, 0.0]),
(False, True, [3.0, 2.0, 2.0, 1.0, 1.0, 1.0, 6.0, 6.0, 7.0, 1.0]),
],
)
def test_minmax(self, is_max, has_nan, exp_list):
nan_idx = [0, 5, 8]
df = DataFrame(
{
"data": [5.0, 4.0, 3.0, 2.0, 1.0, 0.0, 6.0, 7.0, 8.0, 9.0],
"start": [2, 0, 3, 0, 4, 0, 5, 5, 7, 3],
"end": [3, 4, 4, 5, 5, 6, 7, 8, 9, 10],
}
)
if has_nan:
df.loc[nan_idx, "data"] = np.nan
expected = Series(exp_list, name="data")
r = df.data.rolling(
PrescribedWindowIndexer(df.start.to_numpy(), df.end.to_numpy())
)
if is_max:
result = r.max(engine="numba")
else:
result = r.min(engine="numba")
tm.assert_series_equal(result, expected)
def test_wrong_order(self):
start = np.array(range(5), dtype=np.int64)
end = start + 1
end[3] = end[2]
start[3] = start[2] - 1
df = DataFrame({"data": start * 1.0, "start": start, "end": end})
r = df.data.rolling(PrescribedWindowIndexer(start, end))
with pytest.raises(
ValueError, match="Start/End ordering requirement is violated at index 3"
):
r.max(engine="numba")
|
TestMinMaxNumba
|
python
|
walkccc__LeetCode
|
solutions/2261. K Divisible Elements Subarrays/2261.py
|
{
"start": 103,
"end": 599
}
|
class ____:
def countDistinct(self, nums: list[int], k: int, p: int) -> int:
ans = 0
root = TrieNode()
def insert(node: TrieNode, i: int, k: int):
nonlocal ans
if i == len(nums) or k - (nums[i] % p == 0) < 0:
return
if nums[i] not in node.children:
node.children[nums[i]] = TrieNode()
ans += 1
insert(node.children[nums[i]], i + 1, k - (nums[i] % p == 0))
for i in range(len(nums)):
insert(root, i, k)
return ans
|
Solution
|
python
|
lepture__authlib
|
authlib/integrations/flask_oauth1/resource_protector.py
|
{
"start": 324,
"end": 3842
}
|
class ____(_ResourceProtector):
"""A protecting method for resource servers. Initialize a resource
protector with the these method:
1. query_client
2. query_token,
3. exists_nonce
Usually, a ``query_client`` method would look like (if using SQLAlchemy)::
def query_client(client_id):
return Client.query.filter_by(client_id=client_id).first()
A ``query_token`` method accept two parameters, ``client_id`` and ``oauth_token``::
def query_token(client_id, oauth_token):
return Token.query.filter_by(
client_id=client_id, oauth_token=oauth_token
).first()
And for ``exists_nonce``, if using cache, we have a built-in hook to create this method::
from authlib.integrations.flask_oauth1 import create_exists_nonce_func
exists_nonce = create_exists_nonce_func(cache)
Then initialize the resource protector with those methods::
require_oauth = ResourceProtector(
app,
query_client=query_client,
query_token=query_token,
exists_nonce=exists_nonce,
)
"""
def __init__(
self, app=None, query_client=None, query_token=None, exists_nonce=None
):
self.query_client = query_client
self.query_token = query_token
self._exists_nonce = exists_nonce
self.app = app
if app:
self.init_app(app)
def init_app(self, app, query_client=None, query_token=None, exists_nonce=None):
if query_client is not None:
self.query_client = query_client
if query_token is not None:
self.query_token = query_token
if exists_nonce is not None:
self._exists_nonce = exists_nonce
methods = app.config.get("OAUTH1_SUPPORTED_SIGNATURE_METHODS")
if methods and isinstance(methods, (list, tuple)):
self.SUPPORTED_SIGNATURE_METHODS = methods
self.app = app
def get_client_by_id(self, client_id):
return self.query_client(client_id)
def get_token_credential(self, request):
return self.query_token(request.client_id, request.token)
def exists_nonce(self, nonce, request):
if not self._exists_nonce:
raise RuntimeError('"exists_nonce" function is required.')
timestamp = request.timestamp
client_id = request.client_id
token = request.token
return self._exists_nonce(nonce, timestamp, client_id, token)
def acquire_credential(self):
req = self.validate_request(
_req.method, _req.url, _req.form.to_dict(flat=True), _req.headers
)
g.authlib_server_oauth1_credential = req.credential
return req.credential
def __call__(self, scope=None):
def wrapper(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
try:
self.acquire_credential()
except OAuth1Error as error:
body = dict(error.get_body())
return Response(
json.dumps(body),
status=error.status_code,
headers=default_json_headers,
)
return f(*args, **kwargs)
return decorated
return wrapper
def _get_current_credential():
return g.get("authlib_server_oauth1_credential")
current_credential = LocalProxy(_get_current_credential)
|
ResourceProtector
|
python
|
django-extensions__django-extensions
|
tests/test_find_template.py
|
{
"start": 140,
"end": 656
}
|
class ____(TestCase):
@patch("sys.stdout", new_callable=StringIO)
def test_finding_template(self, m_stdout):
call_command("find_template", "admin/change_form.html")
self.assertIn("admin/change_form.html", m_stdout.getvalue())
@patch("sys.stderr", new_callable=StringIO)
def test_should_print_error_when_template_not_found(self, m_stderr):
call_command("find_template", "not_found_template.html")
self.assertIn("No template found", m_stderr.getvalue())
|
FindTemplateTests
|
python
|
huggingface__transformers
|
src/transformers/models/albert/modeling_albert.py
|
{
"start": 25963,
"end": 29149
}
|
class ____(AlbertPreTrainedModel):
def __init__(self, config: AlbertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[SequenceClassifierOutput, tuple]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
|
AlbertForSequenceClassification
|
python
|
Netflix__metaflow
|
test/test_config/config_parser.py
|
{
"start": 2137,
"end": 2735
}
|
class ____(FlowSpec):
trigger_param = Parameter(
"trigger_param",
default="",
external_trigger=True,
external_artifact=trigger_name_func,
)
cfg = Config("cfg", default_value=default_config)
req_config = Config(
"req_config", default="config_parser_requirements.txt", parser=req_parser
)
@step
def start(self):
import regex
self.lib_version = regex.__version__ # Should be '2.5.148'
self.next(self.end)
@step
def end(self):
pass
if __name__ == "__main__":
ConfigParser()
|
ConfigParser
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/templates.py
|
{
"start": 70928,
"end": 73077
}
|
class ____(RegexLexer):
"""
Generic
`angular2 <http://victorsavkin.com/post/119943127151/angular-2-template-syntax>`_
template lexer.
Highlights only the Angular template tags (stuff between `{{` and `}}` and
special attributes: '(event)=', '[property]=', '[(twoWayBinding)]=').
Everything else is left for a delegating lexer.
.. versionadded:: 2.1
"""
name = "Angular2"
aliases = ['ng2']
tokens = {
'root': [
(r'[^{([*#]+', Other),
# {{meal.name}}
(r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'ngExpression'),
# (click)="deleteOrder()"; [value]="test"; [(twoWayTest)]="foo.bar"
(r'([([]+)([\w:.-]+)([\])]+)(\s*)(=)(\s*)',
bygroups(Punctuation, Name.Attribute, Punctuation, Text, Operator, Text),
'attr'),
(r'([([]+)([\w:.-]+)([\])]+)(\s*)',
bygroups(Punctuation, Name.Attribute, Punctuation, Text)),
# *ngIf="..."; #f="ngForm"
(r'([*#])([\w:.-]+)(\s*)(=)(\s*)',
bygroups(Punctuation, Name.Attribute, Punctuation, Operator), 'attr'),
(r'([*#])([\w:.-]+)(\s*)',
bygroups(Punctuation, Name.Attribute, Punctuation)),
],
'ngExpression': [
(r'\s+(\|\s+)?', Text),
(r'\}\}', Comment.Preproc, '#pop'),
# Literals
(r':?(true|false)', String.Boolean),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
# Variabletext
(r'[a-zA-Z][\w-]*(\(.*\))?', Name.Variable),
(r'\.[\w-]+(\(.*\))?', Name.Variable),
# inline If
(r'(\?)(\s*)([^}\s]+)(\s*)(:)(\s*)([^}\s]+)(\s*)',
bygroups(Operator, Text, String, Text, Operator, Text, String, Text)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
|
Angular2Lexer
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/inputs.py
|
{
"start": 774,
"end": 995
}
|
class ____(graphene.InputObjectType):
assetKey = graphene.NonNull(GrapheneAssetKeyInput)
name = graphene.NonNull(graphene.String)
class Meta:
name = "AssetCheckHandleInput"
|
GrapheneAssetCheckHandleInput
|
python
|
pytorch__pytorch
|
torch/distributed/_tools/memory_tracker.py
|
{
"start": 1262,
"end": 11862
}
|
class ____:
"""
Collect and plot the memory stats at operator level.
Includes ``memories_allocated``, ``memories_active`` and ``memories_reserved``.
It also prints a summary for the top 20 operators that generate the most memories.
Example usage:
>>> # xdoctest: +SKIP(failing)
>>> net.cuda()
>>> input = input.cuda()
>>> mem_tracker = MemoryTracker()
>>> mem_tracker.start_monitor(net)
>>> net.zero_grad(True)
>>> loss = net(input)
>>> if isinstance(loss, dict):
>>> loss = loss['out']
>>> loss.sum().backward()
>>> net.zero_grad(set_to_none=True)
>>> mem_tracker.stop()
>>> mem_tracker.summary()
>>> mem_tracker.show_traces()
"""
def __init__(self) -> None:
torch._C._log_api_usage_once("torch.distributed.memory_tracker")
self._hooks: list[RemovableHandle] = []
self._operator_names: dict[str, int] = defaultdict(int)
self.memories_allocated: dict[int, dict[str, float]] = defaultdict()
self.memories_active: dict[int, dict[str, float]] = defaultdict()
self.memories_reserved: dict[int, dict[str, float]] = defaultdict()
self._markers: dict[str, int] = defaultdict(int)
self._cur_module_name: str = ""
self._op_index: int = 0
self._num_alloc_retries: int = 0
self._device_module = torch.get_device_module()
@no_type_check
def start_monitor(self, root_module: nn.Module) -> None:
"""
Register module hooks and entering ``MemoryProfileDispatchMode``.
This enables operator level memory stats can be tracked during module runtime.
"""
self._clear_state()
root_module.__setattr__("_memory_tracker_is_root", True)
for name, m in root_module.named_modules():
if m is not root_module:
m.__setattr__("_memory_tracker_is_root", False)
# fused_proxy_group does not support hooks
if ".fused_proxy_grouped_embedding_bag" in name:
continue
# hook ordering with other hooks added by users is not managed, so
# the memory stats tracked here may not completely accurate.
h1 = m.register_forward_pre_hook(self._create_pre_forward_hook(name))
h2 = m.register_forward_hook(self._create_post_forward_hook(name))
# it does not work well with jagged tensor somehow, the root cause is not
# clear and remove it for now as it does not really capture important info.
# h3 = m.register_backward_hook(self._create_backward_hook(name))
self._hooks.extend([h1, h2])
self._device_module.empty_cache()
assert getattr(self, "profile_mode", None) is None
self.profile_mode = MemoryProfileDispatchMode(self)
self.profile_mode.__enter__()
@no_type_check
def stop(self) -> None:
"""
Remove module hooks and exit ``MemoryProfileDispatchMode`` to stop tracking memory stats at operator level.
Get some aggregated stats when the memory_tracker() is enabled, like ``num_alloc_retries``.
"""
self._num_alloc_retries = self._device_module.memory_stats().get(
"num_alloc_retries", 0
)
for h in self._hooks:
h.remove()
self._hooks.clear()
assert getattr(self, "profile_mode", None) is not None
self.profile_mode.__exit__(None, None, None)
self.profile_mode = None
@no_type_check
def summary(self, top: int = 20) -> None:
"""
Print out the top operators that generate the most memories.
The number of the top operators can be configured.
"""
op_diff: dict[str, float] = defaultdict(float)
op_name, previous_allocated_memory = self.memories_allocated[0]
for i in range(1, self._op_index):
op_name, current_allocated_memory = self.memories_allocated[i]
op_diff[op_name] = current_allocated_memory - previous_allocated_memory
previous_allocated_memory = current_allocated_memory
print("------------------------------------------------")
print(f"The number of alloc retries are: {self._num_alloc_retries}")
print(f"Top {top} ops that generates memory are:")
for k, v in sorted(op_diff.items(), key=operator.itemgetter(1), reverse=True)[
:top
]:
print(f"{k}: {v}MB")
print("------------------------------------------------")
@no_type_check
def show_traces(self, path: str = "") -> None:
import matplotlib.pyplot as plt
def _plot_figure(x, y_values, labels):
min_val = min(chain.from_iterable(y_values)) * 0.999
max_val = max(chain.from_iterable(y_values)) * 1.001
plt.figure()
for y, label in zip(y_values, labels):
plt.plot(x, y, label=label)
plt.xlabel("# Operator Calls")
plt.ylabel("Memory (MB)")
plt.legend()
for marker_name, marker in self._markers.items():
if marker_name == "fw_bw_boundary":
plt.plot(
[marker, marker],
[min_val, max_val],
"r",
lw=2,
label=marker_name,
)
else:
plt.plot(
[marker, marker],
[min_val, max_val],
"k-",
lw=2,
label=marker_name,
)
if path != "":
self.load(path)
y_1 = [gb for (name, gb) in self.memories_allocated.values()]
y_2 = [gb for (name, gb) in self.memories_active.values()]
y_3 = [gb for (name, gb) in self.memories_reserved.values()]
x = list(range(len(y_1)))
# Split figures when there is big difference between
# "reserved_memory" and "allocated_memory" or "active_memory".
_plot_figure(
x,
[list(y_1), list(y_2), list(y_3)],
["allocated_memory", "active_memory", "reserved_memory"],
)
_plot_figure(x, [list(y_1)], ["allocated_memory"])
_plot_figure(x, [list(y_2)], ["active_memory"])
_plot_figure(x, [list(y_3)], ["reserved_memory"])
def save_stats(self, path: str) -> None:
"""Save the stats using pickle during runtime if users want to plot the traces in other places like notebook."""
stats = {
"memories_allocated": self.memories_allocated,
"memories_active": self.memories_active,
"memories_reserved": self.memories_reserved,
"markers": self._markers,
"num_alloc_retries": self._num_alloc_retries,
}
with open(path, "wb") as f:
pickle.dump(stats, f, pickle.HIGHEST_PROTOCOL)
def load(self, path: str) -> None:
"""Load the pickled memory stats to plot the traces or print the summary."""
with open(path, "rb") as f:
stats = pickle.load(f)
self.memories_allocated = stats["memories_allocated"]
self.memories_active = stats["memories_active"]
self.memories_reserved = stats["memories_reserved"]
self._markers = stats["markers"]
self._num_alloc_retries = stats["num_alloc_retries"]
def _create_pre_forward_hook(self, name: str) -> Callable:
"""Prefix operator name with current module and 'forward', and insert 'fw_start' marker at forward pass start."""
def _pre_forward_hook(module: nn.Module, inputs: Any) -> None:
self._cur_module_name = f"{name}.forward"
if (
# pyrefly: ignore [invalid-argument]
hasattr(module, "_memory_tracker_is_root")
# pyrefly: ignore [not-callable]
and module._memory_tracker_is_root
):
self._add_marker("fw_start")
return _pre_forward_hook
def _create_post_forward_hook(self, name: str) -> Callable:
"""Insert the marker 'fw_bw_boundary' at the boundary of forward and backward pass."""
def _post_forward_hook(
module: nn.Module,
inputs: Sequence[torch.Tensor],
outputs: Sequence[torch.Tensor],
) -> None:
if (
# pyrefly: ignore [invalid-argument]
hasattr(module, "_memory_tracker_is_root")
# pyrefly: ignore [not-callable]
and module._memory_tracker_is_root
):
self._add_marker("fw_bw_boundary")
return _post_forward_hook
def _create_backward_hook(self, name: str) -> Callable:
"""Insert the current module name with backward prefix for the operator name."""
def _backward_hook(
module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor
) -> None:
self._cur_module_name = f"{name}.backward"
return _backward_hook
@no_type_check
def _record_memory_stats(self, fn_name: str) -> None:
"""
Record current memory allocated, current memory active and current memory reserved.
The memory stats dict is indexed with ``self._op_index``.
"""
memory_allocated: float = self._device_module.memory_allocated() / BYTES_PER_MB
memory_reserved: float = self._device_module.memory_reserved() / BYTES_PER_MB
memory_active: float = (
self._device_module.memory_stats().get("active_bytes.all.current", 0)
/ BYTES_PER_MB
)
self.memories_allocated[self._op_index] = (fn_name, memory_allocated)
self.memories_reserved[self._op_index] = (fn_name, memory_reserved)
self.memories_active[self._op_index] = (fn_name, memory_active)
self._op_index += 1
def _add_marker(self, marker_name: str) -> None:
"""Set the marker's x-axis value."""
marker_val = len(self.memories_allocated.values())
self._markers[marker_name] = marker_val
def _clear_state(self) -> None:
"""Clear states when start_monitor() is called."""
self._operator_names.clear()
self.memories_allocated.clear()
self.memories_active.clear()
self.memories_reserved.clear()
self._markers.clear()
self._cur_module_name = ""
self._op_index = 0
self._num_alloc_retries = 0
|
MemoryTracker
|
python
|
walkccc__LeetCode
|
solutions/2178. Maximum Split of Positive Even Integers/2178.py
|
{
"start": 0,
"end": 294
}
|
class ____:
def maximumEvenSplit(self, finalSum: int) -> list[int]:
if finalSum % 2 == 1:
return []
ans = []
needSum = finalSum
even = 2
while needSum - even >= even + 2:
ans.append(even)
needSum -= even
even += 2
return ans + [needSum]
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/endpoints/utils/sortby.py
|
{
"start": 161,
"end": 1817
}
|
class ____:
"""
SortByParam assists in parsing a 'sortBy' parameter from the request,
validating it against an endpoint-specific config, and providing the
values that should be passed along to QuerySet.order_by and the Paginator.
To guarantee stable results with potentially duplicated sort keys, 'id' is
used as a fallback sort key.
The parameter is expected to be in the format of "[-]<field_name>", where
the optional "-" prefix indicates descending order and the field_name
must be a key in the provided mapping.
"""
"The sort keys that should be passed to the QuerySet order_by method."
db_order_by: Sequence[str]
"The name of the database field we should use to sort the queryset."
db_field_name: str
@staticmethod
def parse(sort_by: str, api_to_db_map: Mapping[str, str]) -> "SortByParam":
"""
Parse the 'sortBy' parameter from the request, raising a ValidationError if the
field is invalid.
api_to_db_map is a mapping from the API field name to the database field name
to be used for sorting.
"""
order_prefix = "-" if sort_by.startswith("-") else ""
sort_field = sort_by[len(order_prefix) :]
if sort_field not in api_to_db_map:
raise ValidationError({"sortBy": ["Invalid sort field"]})
db_field_name = api_to_db_map[sort_field]
field_order_by = order_prefix + db_field_name
if db_field_name == "id":
return SortByParam((field_order_by,), db_field_name)
else:
return SortByParam((field_order_by, order_prefix + "id"), db_field_name)
|
SortByParam
|
python
|
doocs__leetcode
|
solution/0200-0299/0244.Shortest Word Distance II/Solution.py
|
{
"start": 0,
"end": 649
}
|
class ____:
def __init__(self, wordsDict: List[str]):
self.d = defaultdict(list)
for i, w in enumerate(wordsDict):
self.d[w].append(i)
def shortest(self, word1: str, word2: str) -> int:
a, b = self.d[word1], self.d[word2]
ans = inf
i = j = 0
while i < len(a) and j < len(b):
ans = min(ans, abs(a[i] - b[j]))
if a[i] <= b[j]:
i += 1
else:
j += 1
return ans
# Your WordDistance object will be instantiated and called as such:
# obj = WordDistance(wordsDict)
# param_1 = obj.shortest(word1,word2)
|
WordDistance
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/super1.py
|
{
"start": 260,
"end": 358
}
|
class ____(ClassA):
def __init__(self):
pass
def method2(self):
pass
|
ClassB
|
python
|
faif__python-patterns
|
patterns/structural/composite.py
|
{
"start": 1240,
"end": 1376
}
|
class ____(ABC):
@abstractmethod
def render(self) -> None:
raise NotImplementedError("You should implement this!")
|
Graphic
|
python
|
numba__numba
|
numba/tests/test_sort.py
|
{
"start": 16618,
"end": 17370
}
|
class ____(object):
timsort = jit_array_timsort
test_merge_at = None
test_merge_force_collapse = None
def wrap_with_mergestate(self, timsort, func, _cache=None):
"""
Wrap *func* into another compiled function inserting a runtime-created
mergestate as the first function argument.
"""
if _cache is None:
_cache = {}
key = timsort, func
if key in _cache:
return _cache[key]
merge_init = timsort.merge_init
@timsort.compile
def wrapper(keys, values, *args):
ms = merge_init(keys)
res = func(ms, keys, values, *args)
return res
_cache[key] = wrapper
return wrapper
|
JITTimsortMixin
|
python
|
pytorch__pytorch
|
torch/fx/_symbolic_trace.py
|
{
"start": 42037,
"end": 42225
}
|
class ____(_PatchedFn):
def revert(self):
self.frame_dict[self.fn_name] = self.orig_fn
def patch(self):
self.frame_dict[self.fn_name] = self.new_fn
|
_PatchedFnSetItem
|
python
|
PyCQA__pylint
|
tests/functional/d/dataclass/dataclass_typecheck.py
|
{
"start": 1831,
"end": 1960
}
|
class ____:
def __enter__(self):
pass
def __exit__(self, type_, value, traceback):
pass
@dataclass
|
Manager
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/core/property/test_data_class.py
|
{
"start": 1144,
"end": 1373
}
|
class ____:
a: list[int]
b: list[int]
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
|
DataclassData
|
python
|
kamyu104__LeetCode-Solutions
|
Python/array-transformation.py
|
{
"start": 31,
"end": 641
}
|
class ____(object):
def transformArray(self, arr):
"""
:type arr: List[int]
:rtype: List[int]
"""
def is_changable(arr):
return any(arr[i-1] > arr[i] < arr[i+1] or
arr[i-1] < arr[i] > arr[i+1]
for i in xrange(1, len(arr)-1))
while is_changable(arr):
new_arr = arr[:]
for i in xrange(1, len(arr)-1):
new_arr[i] += arr[i-1] > arr[i] < arr[i+1]
new_arr[i] -= arr[i-1] < arr[i] > arr[i+1]
arr = new_arr
return arr
|
Solution
|
python
|
django__django
|
tests/admin_checks/models.py
|
{
"start": 950,
"end": 1171
}
|
class ____(models.Model):
name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
price = models.FloatField()
authors = models.ManyToManyField(Author, through="AuthorsBooks")
|
Book
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_private_ip_v4.py
|
{
"start": 876,
"end": 1875
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_ipv4_private"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_ipv4_private(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesToBePrivateIpV4
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/asset_selection.py
|
{
"start": 37932,
"end": 38955
}
|
class ____(AssetSelection):
include_sources: bool
kind_str: Optional[str]
def resolve_inner(
self,
asset_graph: BaseAssetGraph,
allow_missing: bool,
) -> AbstractSet[AssetKey]:
base_nodes = {
node.key: node
for node in asset_graph.asset_nodes
if self.include_sources or node.is_materializable
}
if self.kind_str is None:
return {
node.key
for key, node in base_nodes.items()
if (not any(tag_key.startswith(KIND_PREFIX) for tag_key in (node.tags or {})))
}
else:
return {
key
for key, node in base_nodes.items()
if node.tags.get(f"{KIND_PREFIX}{self.kind_str}") is not None
}
def to_selection_str(self) -> str:
if self.kind_str is None:
return "kind:<null>"
return f'kind:"{self.kind_str}"'
@whitelist_for_serdes
@record
|
KindAssetSelection
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/overrides.py
|
{
"start": 2638,
"end": 2710
}
|
class ____:
def method(self, arg):
return arg
|
SkippedOverrides
|
python
|
kamyu104__LeetCode-Solutions
|
Python/sentence-screen-fitting.py
|
{
"start": 37,
"end": 941
}
|
class ____(object):
def wordsTyping(self, sentence, rows, cols):
"""
:type sentence: List[str]
:type rows: int
:type cols: int
:rtype: int
"""
def words_fit(sentence, start, cols):
if len(sentence[start]) > cols:
return 0
s, count = len(sentence[start]), 1
i = (start + 1) % len(sentence)
while s + 1 + len(sentence[i]) <= cols:
s += 1 + len(sentence[i])
count += 1
i = (i + 1) % len(sentence)
return count
wc = [0] * len(sentence)
for i in xrange(len(sentence)):
wc[i] = words_fit(sentence, i, cols)
words, start = 0, 0
for i in xrange(rows):
words += wc[start]
start = (start + wc[start]) % len(sentence)
return words / len(sentence)
|
Solution
|
python
|
PrefectHQ__prefect
|
src/prefect/logging/clients.py
|
{
"start": 10920,
"end": 11825
}
|
class ____(PrefectLogsSubscriber):
"""Logs subscriber for Prefect Cloud"""
def __init__(
self,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
filter: Optional["LogFilter"] = None,
reconnection_attempts: int = 10,
):
"""
Args:
api_url: The base URL for a Prefect Cloud workspace
api_key: The API key of an actor with the see_flows scope
filter: Log filter to apply
reconnection_attempts: When the client is disconnected, how many times
the client should attempt to reconnect
"""
api_url, api_key = _get_api_url_and_key(api_url, api_key)
super().__init__(
api_url=api_url,
filter=filter,
reconnection_attempts=reconnection_attempts,
)
self._api_key = api_key
|
PrefectCloudLogsSubscriber
|
python
|
PrefectHQ__prefect
|
tests/_internal/pydantic/test_validated_func.py
|
{
"start": 4267,
"end": 5171
}
|
class ____:
"""Test positional-only parameters (Python 3.8+)."""
def test_positional_only_valid(self):
def func(a, b, /, c):
return a + b + c
vf = ValidatedFunction(func)
result = vf.validate_call_args((1, 2, 3), {})
assert result == {"a": 1, "b": 2, "c": 3}
def test_positional_only_with_keyword_for_c(self):
def func(a, b, /, c):
return a + b + c
vf = ValidatedFunction(func)
result = vf.validate_call_args((1, 2), {"c": 3})
assert result == {"a": 1, "b": 2, "c": 3}
def test_positional_only_error(self):
def func(a, b, /, c):
return a + b + c
vf = ValidatedFunction(func)
with pytest.raises(
TypeError, match="positional-only argument.*passed as keyword"
):
vf.validate_call_args((1,), {"b": 2, "c": 3})
|
TestPositionalOnly
|
python
|
vyperlang__vyper
|
tests/evm_backends/base_env.py
|
{
"start": 577,
"end": 752
}
|
class ____:
address: str
topics: list[str]
data: tuple[list[bytes], bytes] # (topic list, non-topic)
# object returned by `last_result` property
@dataclass
|
LogEntry
|
python
|
doocs__leetcode
|
solution/1700-1799/1768.Merge Strings Alternately/Solution.py
|
{
"start": 0,
"end": 161
}
|
class ____:
def mergeAlternately(self, word1: str, word2: str) -> str:
return ''.join(a + b for a, b in zip_longest(word1, word2, fillvalue=''))
|
Solution
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/actions.py
|
{
"start": 37539,
"end": 37879
}
|
class ____(ActionBaseModel):
"""Data used by the Prefect REST API to update an artifact."""
data: Optional[Union[Dict[str, Any], Any]] = Field(None)
description: Optional[str] = Field(None)
metadata_: Optional[
Annotated[dict[str, str], AfterValidator(validate_max_metadata_length)]
] = Field(None)
|
ArtifactUpdate
|
python
|
milvus-io__pymilvus
|
pymilvus/client/asynch.py
|
{
"start": 812,
"end": 1451
}
|
class ____:
@abc.abstractmethod
def result(self, **kwargs):
"""Return deserialized result.
It's a synchronous interface. It will wait executing until
server respond or timeout occur(if specified).
This API is thread-safe.
"""
raise NotImplementedError
@abc.abstractmethod
def cancel(self):
"""Cancle gRPC future.
This API is thread-safe.
"""
raise NotImplementedError
@abc.abstractmethod
def done(self):
"""Wait for request done.
This API is thread-safe.
"""
raise NotImplementedError
|
AbstractFuture
|
python
|
Textualize__textual
|
tests/test_path.py
|
{
"start": 361,
"end": 432
}
|
class ____(App[None]):
CSS_PATH = "/tmp/test.tcss"
|
AbsolutePathStrApp
|
python
|
PrefectHQ__prefect
|
src/prefect/server/events/actions.py
|
{
"start": 38356,
"end": 39930
}
|
class ____(FlowRunAction):
"""Changes the state of a flow run associated with the trigger"""
@abc.abstractmethod
async def new_state(self, triggered_action: "TriggeredAction") -> StateCreate:
"""Return the new state for the flow run"""
async def act(self, triggered_action: "TriggeredAction") -> None:
flow_run_id = await self.flow_run(triggered_action)
self._resulting_related_resources.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.flow-run.{flow_run_id}",
"prefect.resource.role": "target",
}
)
)
logger.info(
"Changing flow run state",
extra={
"flow_run_id": str(flow_run_id),
**self.logging_context(triggered_action),
},
)
async with await self.orchestration_client(triggered_action) as orchestration:
response = await orchestration.set_flow_run_state(
flow_run_id, await self.new_state(triggered_action=triggered_action)
)
self._result_details["status_code"] = response.status_code
if response.status_code >= 300:
raise ActionFailed(self.reason_from_response(response))
result = OrchestrationResult.model_validate(response.json())
if not isinstance(result.details, StateAcceptDetails):
raise ActionFailed(f"Failed to set state: {result.details.reason}")
|
FlowRunStateChangeAction
|
python
|
tensorflow__tensorflow
|
tensorflow/dtensor/python/tpu_util.py
|
{
"start": 2030,
"end": 32826
}
|
class ____:
"""Represents a TPU core's location in the mesh."""
def __init__(self, x: int = 0, y: int = 0, z: int = 0, core: int = 0):
self.x = x
self.y = y
self.z = z
self.core = core
def __eq__(self, other):
if not isinstance(other, _CoreLocation):
return False
return self.x == other.x and self.y == other.y and self.z == other.z and self.core == other.core
def __ne__(self, other):
if not isinstance(other, _CoreLocation):
return True
return not self == other
def __hash__(self):
return hash((self.x, self.y, self.z, self.core))
def __repr__(self):
return f"{type(self).__name__}(x={self.x}, y={self.y}, z={self.z}, core={self.core})"
def to_list(self):
return [self.x, self.y, self.z, self.core]
def _create_device_array(shape, device_type, host_id, local_device_ids=None):
"""Returns ID and device lists that can be used to create a mesh."""
num_global_devices = config.num_global_devices(device_type)
global_device_ids = np.arange(num_global_devices).reshape(shape)
local_device_list = config.local_devices(device_type)
# User can specify local_device_ids or use default list for multi host.
num_local_devices = len(local_device_list)
local_device_ids = [
x + host_id * num_local_devices for x in range(num_local_devices) # pytype: disable=unsupported-operands
] if not local_device_ids else local_device_ids
return global_device_ids, local_device_ids, local_device_list
def _create_tpu_topology(core_locations: List[_CoreLocation], num_tasks: int,
num_devices_per_task: int) -> topology.Topology:
"""Returns a Topology object build from a _CoreLocation list.
Args:
core_locations: A list of _CoreLocation objects sorted first by TF task ID
and then by per-task device ordinals.
num_tasks: The number of TF tasks in the cluster.
num_devices_per_task: The number of TPU devices local to each task.
"""
assert min([l.x for l in core_locations]) == 0
assert min([l.y for l in core_locations]) == 0
assert min([l.z for l in core_locations]) == 0
assert min([l.core for l in core_locations]) == 0
x_max = max([l.x for l in core_locations])
y_max = max([l.y for l in core_locations])
z_max = max([l.z for l in core_locations])
core_max = max([l.core for l in core_locations])
mesh_shape = [x_max + 1, y_max + 1, z_max + 1, core_max + 1]
device_coordinates = [[l.x, l.y, l.z, l.core] for l in core_locations]
device_coordinates = numpy_compat.np_asarray(device_coordinates).reshape(
num_tasks, num_devices_per_task, 4)
return topology.Topology(
mesh_shape=mesh_shape, device_coordinates=device_coordinates)
def shutdown_tpu_system():
"""Shuts down the TPU system."""
@def_function.function
def _shutdown_tpu_system():
return gen_dtensor_ops.shutdown_tpu_system()
success = _shutdown_tpu_system() if context.is_tfrt_enabled() else True
if success:
logging.info("TPU system shut down.")
else:
logging.warning("TPU system fails to shut down.")
def tpu_system_init_helper(task_id,
num_tasks,
num_devices,
use_tfrt_host_runtime=True,
use_megacore=False):
"""A helper function to initialize multi-client tpu system."""
@def_function.function
def _tpu_init_fn():
return gen_dtensor_ops.configure_and_initialize_global_tpu(
use_tfrt_host_runtime=use_tfrt_host_runtime)
@def_function.function
def _set_global_tpu_array_fn(topology_proto):
gen_dtensor_ops.d_tensor_set_global_tpu_array(topology_proto)
with ops.device("/job:" + config.full_job_name() + "/device:TPU_SYSTEM:0"): # pylint: disable=protected-access
my_core_ids = _tpu_init_fn()
if use_megacore:
logging.info("Using TPU megacore")
my_core_ids = my_core_ids * 2
logging.info("TPU core IDs: %s", my_core_ids)
# `my_core_ids` contains the IDs of TPU cores attached to this host.
#
# To generate correct and efficient XLA AllReduce group assignment, we must
# merge these arrays from all hosts and broadcast the result back to all
# hosts, so all hosts can use these mappings in their MLIR passes.
#
# This is essentially doing what WaitForDistributedTpuOp and
# SetGlobalTPUArrayOp do, in our multi-client environment.
num_devices_per_task = int(num_devices / num_tasks)
# Create a one-time use mesh and layout just for merging core IDs.
mesh = layout_lib.Mesh([_MESH_DIM_X],
*_create_device_array((num_devices,), _TPU_DEVICE_TYPE,
config.client_id()))
layout = layout_lib.Layout([_MESH_DIM_X, layout_lib.UNSHARDED], mesh)
device = dtensor_device.DTensorDevice(meshes=[mesh])
logging.info("TPU core locations: %s",
device.tpu_core_ids_to_locations(my_core_ids))
# At this point, we don't know which cores are attached to other hosts.
# The core ID mappings in the runtime haven't been set yet.
#
# The core ID merging AllReduce below is carefully written so it works
# without needing correct core mappings to be set in the runtime. We will
# use this AllReduce's result to set the core ID mappings, and all future
# user-initiated AllReduces will use the mappings.
#
# The runtime is hard-coded to ignore core ID mappings on this AllReduce.
all_core_ids = np.zeros([num_devices], dtype=np.int32)
for i in range(len(my_core_ids)):
all_core_ids[task_id * num_devices_per_task + i] = my_core_ids[i]
# Only one local device gets a valid input. To give an example, assume we have
# 2 tasks and each of them has 8 local devices, then `all_core_ids` in task 0
# will have 8 tensors, where 1 of them may have its value as
# [0,1,2,3,4,5,6,7,0,0,0,0,0,0,0,0] and the other tensors are all zeros. For
# task 1, the case may be one with [0,0,0,0,0,0,0,0,8,9,10,11,12,13,14,15]
# and other 7 are all zeros.
all_core_ids = constant_op.constant([all_core_ids])
zeros = array_ops.zeros_like(all_core_ids)
all_core_ids = [all_core_ids] + [zeros] * (num_devices_per_task - 1)
# All devices on all hosts participate in one AllReduce, whose result will be
# core IDs arranged by task-device ordinals. For the above example, the result
# will be [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15].
with ops.device(device.name):
all_core_ids = device.pack(all_core_ids, layout)
all_core_ids = math_ops.reduce_sum(all_core_ids, axis=[0])
unpacked_all_tpu_ids = device.unpack(all_core_ids)
all_core_ids = list(unpacked_all_tpu_ids[0].numpy())
logging.info("All TPU core IDs: %s", all_core_ids)
# Set the default core ID mappings in the runtime for legacy code and tests.
#
# Legacy code and tests create TPU meshes directly without using the
# `create_tpu_mesh` function below. Those meshes have global device IDs
# equal to TF task-device ordinals. The `all_core_ids` array happens to
# arrange core IDs by TF task-device ordinals. Using this array on those
# meshes guarantee correct although inefficient results.
device.set_tpu_core_ids("", all_core_ids)
# Remember enough global, immutable information to be able to build any ring
# we want prescribed by `create_tpu_mesh` in the future.
global _all_core_ids
_all_core_ids = all_core_ids
all_core_locations = device.tpu_core_ids_to_locations(all_core_ids)
all_core_locations = [
_CoreLocation(l[0], l[1], l[2], l[3]) for l in all_core_locations
]
global _all_core_locations
_all_core_locations = all_core_locations
logging.info("All TPU core locations: %s", all_core_locations)
tpu_topology = _create_tpu_topology(all_core_locations, num_tasks,
num_devices_per_task)
_set_global_tpu_array_fn(tpu_topology.serialized())
return tpu_topology, device
def initialize_tpu_system(use_megacore=False):
"""Initializes the TPU system."""
# Make sure the server change is fully propagated before attempting to run
# the core ID merging logic below.
context.ensure_initialized()
context.async_wait()
context.context()._clear_caches() # pylint: disable=protected-access
use_tfrt_host_runtime = context.context().use_tfrt
logging.info("Using TFRT host runtime is set to %s", use_tfrt_host_runtime)
try:
task_id = config.client_id()
num_tasks = config.num_clients()
num_devices = config.num_global_devices(_TPU_DEVICE_TYPE)
tpu_topology, device = tpu_system_init_helper(
task_id,
num_tasks,
num_devices,
use_tfrt_host_runtime=use_tfrt_host_runtime,
use_megacore=use_megacore)
global _tpu_topology
_tpu_topology = tpu_topology
logging.vlog(1, "TPU Topology: %s, %s", tpu_topology.mesh_shape,
tpu_topology.device_coordinates)
global _dtensor_device
_dtensor_device = device
context.async_wait()
except errors.InvalidArgumentError as e:
raise errors.NotFoundError(
None, None,
"Initialization failed, no valid TPUs found. " + str(e)) from e
except errors.InternalError as e:
logging.error("Hit internal error during TPU system initialization. "
+ "It is likely hardware failure. \nPlease check the error "
+ "messages above to see whether that's the case. \nIf so, "
+ "consider to restart the job or try another machine.")
raise e
# Clear out the eager context caches since the memory is invalid now.
logging.info("Clearing out eager caches")
context.context()._clear_caches() # pylint: disable=protected-access
def _enumerate_cores(bounds: List[int], ring_bounds: List[int],
ring_sizes: List[int], host_bounds: List[int],
host_sizes: List[int]) -> List[List[int]]:
"""Enumerates cores within `bounds` from fatest to slowest varying axes.
Args:
bounds: Upper bounds of axes, from fastest to slowest varying.
ring_bounds: Upper bounds of ring size per axis in the same axis order.
ring_sizes: Number consecutive cores in the ring built so far, cumulatively.
host_bounds: Number of axis values per host in the same axis order.
host_sizes: Number consecutive cores on one host, cumulatively.
Returns:
Cores represented as a list of 4 integers in the same axis order.
"""
if not bounds:
return [[]]
# Recursively enumerate cores under all but the slowest varying axis.
partials = _enumerate_cores(bounds[:-1], ring_bounds[:-1], ring_sizes[:-1],
host_bounds[:-1], host_sizes[:-1])
# Append the slowest varying axis to the end of all partial results.
# From ring_i|j to host_i|j to core_i|j, use progressively smaller or equal
# iteration groupings until every one of the bounds[-1] * len(partials)
# combinations is iterated on.
# Despite the six levels of nested loops below, the total time complexity for
# this invocation is O(N), where N is the number of cores in the topology.
results = []
for ring_i in range(0, bounds[-1], ring_bounds[-1]):
for ring_j in range(0, len(partials), ring_sizes[-1]):
for host_i in range(ring_i, ring_i + ring_bounds[-1], host_bounds[-1]):
for host_j in range(ring_j, ring_j + ring_sizes[-1], host_sizes[-1]):
for i in range(host_i, host_i + host_bounds[-1]):
for j in range(host_j, host_j + host_sizes[-1]):
results.append(partials[j] + [i])
return results
def _enumerate_core_locations(bounds: List[int], ring_bounds: List[int],
axes: List[str],
can_split_host_across_rings: bool,
ring_size: int) -> List[_CoreLocation]:
"""Enumerates all possible core locations under the axis iteration order.
Args:
bounds: A list of 4 positive integers, upper bound values for x, y, z, core.
ring_bounds: A list of 4 positive integers, upper bound values for ring size
in x, y, z, core axes.
axes: A permutation of ["x", "y", "z", "core"], the axis iteration order.
can_split_host_across_rings: If true, devices attached to the same host may
get assigned to different rings.
ring_size: Number of devices in a ring, only for argument validation.
Returns:
A list of all CoreLocation objects defined in a TPU slice of shape `bounds`,
sorted by axis iteration order specified by `axes`.
For example, given bounds=[2, 2, 1, 2] and axes=["core", "z", "y", "x"],
return 8 core locations expressed in (x, y, z, core) format but iterated in
core -> z -> y -> x order (fatest to slowest varying):
[_CoreLocation(0, 0, 0, 0),
_CoreLocation(0, 0, 0, 1),
_CoreLocation(0, 1, 0, 0),
_CoreLocation(0, 1, 0, 1),
_CoreLocation(1, 0, 0, 0),
_CoreLocation(1, 0, 0, 1),
_CoreLocation(1, 1, 0, 0),
_CoreLocation(1, 1, 0, 1)]
Raises:
ValueError: If ring_size cannot be fulfilled without splitting hosts.
"""
num_cores_per_chip = bounds[3]
if num_cores_per_chip != 1 and num_cores_per_chip != 2:
raise ValueError("Unsupported TPU slice size: %s" % bounds)
# Translate `axes` from string to integer format.
axes = [{"x": 0, "y": 1, "z": 2, "core": 3}[axis] for axis in axes]
# Reorder bounds from fastest to slowest varying axes.
bounds = [bounds[i] for i in axes]
# Set and validate host_bounds.
if can_split_host_across_rings:
# If we can split hosts, shrink every host to effectively contain 1 device.
host_bounds = [1, 1, 1, 1]
elif np.prod(bounds) <= 2:
# We must be running on 1x1 or 1x1x1 Forge.
host_bounds = [[1, 1, 1, num_cores_per_chip][i] for i in axes]
else:
# Other cases including 2x2 Forge and Borg must use a full donut.
host_bounds = [[2, 2, 1, num_cores_per_chip][i] for i in axes]
# host_sizes is the cumulative products of host_bounts.
host_sizes = [1]
for host_bound in host_bounds:
host_sizes.append(host_sizes[-1] * host_bound)
host_size = host_sizes.pop()
# When can_split_host_across_rings is false, a ring must contain at least as
# many devices as a host has.
if ring_size < host_size:
assert not can_split_host_across_rings
raise ValueError(
"Rings too small for can_split_host_across_rings = False: %d" %
ring_size)
# Reorder ring_bounds and validate it's element-wise >= host_bounds.
ring_bounds = [ring_bounds[i] for i in axes]
if ring_bounds < host_bounds:
raise ValueError("ring_bounds %s should be >= host_bounds %s" %
(ring_bounds, host_bounds))
ring_sizes = [1]
# ring_sizes is the cumulative products of ring_bounds.
for ring_bound in ring_bounds:
ring_sizes.append(ring_sizes[-1] * ring_bound)
ring_sizes.pop()
# Enumerate cores in the given iteration order. Each core is represented as a
# list of int, which are offsets from fatest to slowest varying axes.
cores = _enumerate_cores(bounds, ring_bounds, ring_sizes, host_bounds,
host_sizes)
# Reorder offsets of each core back to the x, y, z, core order.
core_locations = []
for core in cores:
core = [core[axes.index(i)] for i in range(4)]
core_locations.append(_CoreLocation(core[0], core[1], core[2], core[3]))
return core_locations
def _build_all_reduce_ring(core_locations: List[_CoreLocation],
rotate: bool = False) -> List[int]:
"""Reorders a list of TPU cores to optimize for AllReduce performance.
This is ported from the C++ tensorflow::BuildAllReduceRing function,
mixed with some logic from TF TPU's device_assignment._ring_3d.
Args:
core_locations: A list of core locations expressed as [x, y, z, core].
rotate: If true, scan the cores in a column-major order. False by default.
Returns:
A permutation of the input list such that neighbors in the sequence are
nearby in the TPU topology.
"""
permutation = list(range(len(core_locations)))
if not permutation:
return permutation
logging.vlog(2, "Core locations in: %s", core_locations)
first_column = min([l.x for l in core_locations])
first_row = min([l.y for l in core_locations])
same_z = (len(set([l.z for l in core_locations])) == 1)
logging.vlog(2, "first_column: %d", first_column)
logging.vlog(2, "first_row: %d", first_row)
logging.vlog(2, "same_z: %s", same_z)
def _cmp_2d(ia: int, ib: int) -> int:
if not rotate:
a = core_locations[ia]
b = core_locations[ib]
# Order the first column last in the sequence, except for the first row.
a_first = (a.x == first_column and a.y != first_row)
b_first = (b.x == first_column and b.y != first_row)
if a_first != b_first:
return -1 if b_first else 1
# Order rows in increasing order, unless in the first column.
if a.y != b.y:
return b.y - a.y if a_first else a.y - b.y
# Order even rows left to right, odd rows right to left.
if a.x != b.x:
return a.x - b.x if a.y % 2 == 0 else b.x - a.x
# Order cores in increasing order.
return a.core - b.core
else:
a = core_locations[ia]
b = core_locations[ib]
# Order the first row last in the sequence, except for the first column.
a_first = (a.y == first_row and a.x != first_column)
b_first = (b.y == first_row and b.x != first_column)
if a_first != b_first:
return -1 if b_first else 1
# Order columns in increasing order, unless in the first row.
if a.x != b.x:
return b.x - a.x if a_first else a.x - b.x
# Order even columns top down, odd columns bottom up.
if a.y != b.y:
return a.y - b.y if a.x % 2 == 0 else b.y - a.y
# Order cores in increasing order.
return a.core - b.core
def _cmp_3d(ia: int, ib: int) -> int:
a = core_locations[ia]
b = core_locations[ib]
a_corner = (a.x == first_column and a.y == first_row)
b_corner = (b.x == first_column and b.y == first_row)
# If both are in the corner, order in reverse z then core order.
if a_corner and b_corner:
return b.z - a.z if a.z != b.z else a.core - b.core
# Corner cores always go after non-corner cores.
if a_corner != b_corner:
return -1 if b_corner else 1
# Both non-corner cores are on the same z-plane. Reverse odd z-planes.
if a.z == b.z:
return _cmp_2d(ia, ib) if a.z % 2 == 0 else -_cmp_2d(ia, ib)
# Both non-corner cores are on different z-planes. Smaller z goes first.
return a.z - b.z
# If all cores are on the same z-plane, order as usual. Otherwise, order
# neighbor z-planes in opposite orders. Stack all z-planes along the z axis
# and connect them in one corner.
if same_z:
permutation.sort(key=functools.cmp_to_key(_cmp_2d))
else:
permutation.sort(key=functools.cmp_to_key(_cmp_3d))
logging.vlog(2, "Permutation out: %s", permutation)
return permutation
def _build_orthogonal_rings(
core_locations: List[_CoreLocation], ring_size: int,
rotate_ring_across_rings: bool) -> List[_CoreLocation]:
"""Build two all-reduce rings orthogonal to each other.
One ring includes every `ring_size` consecutive core locations. It is usually
applied to the model-parallel dimension of a mesh to achieve best 1D
all-reduce performance. The other ring includes core locations separated by
a stride of `ring_size`. It is usually applied to the data-parallel dimension
of a mesh to get predictable strided all-reduce performance.
Args:
core_locations: A list of core locations expressed as [x, y, z, core].
ring_size: The number of core locations in the consecutive ring.
rotate_ring_across_rings: Build column-major secondary rings.
Returns:
A permutation of the input list forming the described rings.
"""
# Build a ring for the first `ring_size` cores, and apply that permutation to
# every group of `ring_size` cores.
num_cores = len(core_locations)
permutation = _build_all_reduce_ring(core_locations[:ring_size])
for r in range(0, num_cores, ring_size):
core_locations[r:r + ring_size] = [
core_locations[r + permutation[i]] for i in range(ring_size)
]
logging.vlog(1, "Permutated core locations: %s", core_locations)
# Build a "ring" for the collection of devices consisting of the 0th device
# from every group, and apply that permutation to every i-th device group.
# This is achieved by transposing the list and back.
transposed = []
for i in range(ring_size):
transposed += [
core_locations[g + i] for g in range(0, num_cores, ring_size)
]
num_rings = int(num_cores / ring_size)
permutation = _build_all_reduce_ring(
transposed[:num_rings], rotate=rotate_ring_across_rings)
for r in range(0, num_cores, num_rings):
transposed[r:r + num_rings] = [
transposed[r + permutation[i]] for i in range(num_rings)
]
untransposed = []
for i in range(num_rings):
untransposed += [transposed[g + i] for g in range(0, num_cores, num_rings)]
logging.vlog(1, "Stride-permutated core locations: %s", untransposed)
return untransposed
@tf_export("experimental.dtensor.create_tpu_mesh", v1=[])
def create_tpu_mesh(
mesh_dim_names: List[str],
mesh_shape: List[int],
mesh_name: str,
ring_dims: Optional[int] = None,
ring_axes: Optional[List[str]] = None,
ring_bounds: Optional[List[int]] = None,
can_split_host_across_rings: bool = True,
build_ring_across_rings: bool = False,
rotate_ring_across_rings: bool = False,
use_xla_spmd: bool = layout_lib.USE_XLA_SPMD) -> layout_lib.Mesh:
"""Returns a distributed TPU mesh optimized for AllReduce ring reductions.
Only as many as leading axes specified by `ring_axes` as necessary will be
used to build rings, as long as the subslice formed by these axes have enough
cores to contain a ring of the required size. The leftover axes in `ring_axes`
won't affect results.
This function always uses all TPU devices, and offers more customization than
`tf.experimental.dtensor.create_distributed_mesh`.
Args:
mesh_dim_names: List of mesh dimension names.
mesh_shape: Shape of the mesh.
mesh_name: A unique name for the mesh. If empty, internally generate one.
ring_dims: Optional; The number of leading (ring_dims > 0) or trailing
(ring_dims < 0) mesh dimensions to build rings for. If unspecified, build
rings for all but the first dimension.
ring_axes: Optional; A permutation of ["x", "y", "z", "core"], specifying
the order of TPU topology axes to build rings in. If unspecified, default
to ["core", "x", "y", "z"].
ring_bounds: Optional; The maximum number of devices on each axis, in the x,
y, z, core order. If unspecified, default to physical topology limits.
can_split_host_across_rings: Optional; If true, devices attached to the same
host (i.e., DTensor client) may get assigned to different rings. Setting
it to false may cause some combinations of arguments to be infeasible; see
DeviceAssignmentTest.testCreateMesh[No]SplittingHosts* for examples.
build_ring_across_rings: Optional; If true, also build a data-parallel ring
across model-parallel rings. This ring could be strided.
rotate_ring_across_rings: Optional; If true, build the data-parallel ring in
column-major instead of row-major order.
use_xla_spmd: Boolean when True, will use XLA SPMD instead of
DTensor SPMD.
"""
logging.info("Building a TPU mesh %s of shape %s", mesh_name, mesh_shape)
logging.info("Requested ring_dims: %s", ring_dims)
logging.info("Requested ring_axes: %s", ring_axes)
logging.info("Requested ring_bounds: %s", ring_bounds)
logging.info("Requested can_split_host_across_rings: %s",
can_split_host_across_rings)
if not mesh_name:
mesh_name = "mesh_%f" % time.time()
logging.info("Requested mesh_name: %s", mesh_name)
# By default, build rings for all but the first (usually batch) dimension.
if ring_dims is None:
ring_dims = 1 - len(mesh_shape)
elif ring_dims < -len(mesh_shape) or ring_dims > len(mesh_shape):
raise ValueError("Invalid ring_dims value: %d" % ring_dims)
logging.info("Actual ring_dims: %s", ring_dims)
# By default, vary axes in the core -> x -> y -> z order.
if ring_axes is None:
ring_axes = ["core", "x", "y", "z"]
elif len(ring_axes) != 4:
raise ValueError("Expected 4 elements in ring_axes, got %s" % ring_axes)
elif sorted(ring_axes) != ["core", "x", "y", "z"]:
raise ValueError("Invalid ring_axes value: %s" % ring_axes)
logging.info("Actual ring_axes: %s", ring_axes)
# Validate ring_bounds values.
if _tpu_topology is None:
raise ValueError(
"Invalid TPU topology, run dtensor.initialize_tpu_system() first")
topology_shape = list(_tpu_topology.mesh_shape)
if ring_bounds is None:
ring_bounds = topology_shape
elif len(ring_bounds) != 4:
raise ValueError("Expected 4 elements in ring_bounds, got %s" % ring_bounds)
elif ring_bounds > topology_shape:
raise ValueError("ring_bounds %s should be <= topology sizes %s" %
(ring_bounds, topology_shape))
logging.info("Actual ring_bounds: %s", ring_bounds)
# Compute ring_size, the number of cores in a ring.
if ring_dims > 0:
ring_size = np.prod(mesh_shape[:ring_dims])
elif ring_dims < 0:
ring_size = np.prod(mesh_shape[ring_dims:])
else:
ring_size = 1 # single-core rings
logging.info("Actual ring_size: %d", ring_size)
# Rearrange all cores according to the axis iteration order.
global_core_locations = _enumerate_core_locations(
topology_shape, ring_bounds, ring_axes, can_split_host_across_rings,
ring_size)
logging.vlog(1, "Enumerated core locations: %s", global_core_locations)
num_cores = len(global_core_locations)
# The mesh to be created must use all TPU cores in the system.
mesh_size = np.prod(mesh_shape)
if mesh_size != num_cores:
raise ValueError(
"Invalid mesh size: mesh shape %s cannot 1:1 map to %d TPU cores" %
(mesh_shape, num_cores))
# Build a ring for the `ring_size` dimension and, if required, a strided ring
# for the orthogonal dimension.
if build_ring_across_rings:
global_core_locations = _build_orthogonal_rings(global_core_locations,
ring_size,
rotate_ring_across_rings)
else:
permutation = _build_all_reduce_ring(global_core_locations[:ring_size])
for r in range(0, num_cores, ring_size):
global_core_locations[r:r + ring_size] = [
global_core_locations[r + permutation[i]] for i in range(ring_size)
]
logging.vlog(1, "Permutated core locations: %s", global_core_locations)
# For this point on, change from List[CoreLocation] to List[List[int]] for
# easier interaction with the C++ API.
global_core_locations = [l.to_list() for l in global_core_locations]
if _dtensor_device is None:
raise ValueError("Invalid system device, "
"run dtensor.initialize_accelerator_system() first")
global_core_ids = _dtensor_device.tpu_core_locations_to_ids(
global_core_locations)
# Store a per-mesh mapping in the runtime.
_dtensor_device.set_tpu_core_ids(mesh_name, global_core_ids)
# Create the mesh by manually specifying local_device_ids.
local_core_locations = _tpu_topology.device_coordinates[config.client_id()]
indexes = [
global_core_locations.index(list(local_core_location))
for local_core_location in local_core_locations
]
global_device_ids, local_device_ids, local_device_list = _create_device_array(
mesh_shape, _TPU_DEVICE_TYPE, None, local_device_ids=indexes)
return layout_lib.Mesh(
mesh_dim_names,
global_device_ids,
local_device_ids,
local_device_list,
mesh_name,
use_xla_spmd=use_xla_spmd,
)
def get_device_ids(mesh: layout_lib.Mesh,
client_id: Optional[int] = None) -> List[int]:
"""Returns the device IDs of all TPU cores local to the given client.
A device ID is a non-negative integer that uniquely identifies a device in the
mesh. For example, for a 2x2 mesh ('x', 'y'), this function returns a
permutation of [0, 1, 2, 3].
Note that device IDs and device locations are equivalent. The former is a
linearization of the latter along mesh dimensions.
Args:
mesh: A TPU mesh.
client_id: Optional; A DTensor client ID. If empty, query this client.
"""
if mesh.device_type() != _TPU_DEVICE_TYPE:
raise ValueError("The mesh must be a TPU mesh")
if client_id is None or client_id == config.client_id():
return mesh.local_device_ids()
# It's not clear we should ever allow a client to query other clients for
# their device IDs.
raise NotImplementedError(
"Looking up other clients' device IDs is not supported")
def get_device_locations(
mesh: layout_lib.Mesh,
client_id: Optional[int] = None) -> List[Dict[str, int]]:
"""Returns the device locations of all TPU cores local to the given client.
A device location is a dictionary from dimension names to indices on those
dimensions. For example, for a 2x2 mesh ('x', 'y'), this function returns a
permutation of this list:
[{'x': 0, 'y': 0},
{'x': 0, 'y': 1},
{'x': 1, 'y': 0},
{'x': 1, 'y': 1}].
Note that device IDs and device locations are equivalent. The former is a
linearization of the latter along mesh dimensions.
Args:
mesh: A TPU mesh.
client_id: Optional; A DTensor client ID. If empty, query this client.
"""
if mesh.device_type() != _TPU_DEVICE_TYPE:
raise ValueError("The mesh must be a TPU mesh")
if client_id is None or client_id == config.client_id():
return mesh.local_device_locations()
# It's not clear we should ever allow a client to query other clients for
# their device locations.
raise NotImplementedError(
"Looking up other clients' device locations is not supported")
# TODO(b/245589661): Remove dtensor_initialize_tpu_system() and
# dtensor_shutdown_tpu_system() after users stopped using them.
def dtensor_initialize_tpu_system(enable_coordination_service=False):
"""Deprecated way to initialize the TPU system."""
from . import accelerator_util # pylint: disable=g-import-not-at-top
accelerator_util.initialize_accelerator_system(
"TPU", enable_coordination_service=enable_coordination_service)
def dtensor_shutdown_tpu_system():
"""Deprecated way to shutodwn the TPU system."""
from . import accelerator_util # pylint: disable=g-import-not-at-top
accelerator_util.shutdown_accelerator_system()
|
_CoreLocation
|
python
|
TheAlgorithms__Python
|
other/greedy.py
|
{
"start": 0,
"end": 1970
}
|
class ____:
def __init__(self, name, value, weight):
self.name = name
self.value = value
self.weight = weight
def __repr__(self):
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def get_value(self):
return self.value
def get_name(self):
return self.name
def get_weight(self):
return self.weight
def value_weight(self):
return self.value / self.weight
def build_menu(name, value, weight):
menu = []
for i in range(len(value)):
menu.append(Things(name[i], value[i], weight[i]))
return menu
def greedy(item, max_cost, key_func):
items_copy = sorted(item, key=key_func, reverse=True)
result = []
total_value, total_cost = 0.0, 0.0
for i in range(len(items_copy)):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i])
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def test_greedy():
"""
>>> food = ["Burger", "Pizza", "Coca Cola", "Rice",
... "Sambhar", "Chicken", "Fries", "Milk"]
>>> value = [80, 100, 60, 70, 50, 110, 90, 60]
>>> weight = [40, 60, 40, 70, 100, 85, 55, 70]
>>> foods = build_menu(food, value, weight)
>>> foods # doctest: +NORMALIZE_WHITESPACE
[Things(Burger, 80, 40), Things(Pizza, 100, 60), Things(Coca Cola, 60, 40),
Things(Rice, 70, 70), Things(Sambhar, 50, 100), Things(Chicken, 110, 85),
Things(Fries, 90, 55), Things(Milk, 60, 70)]
>>> greedy(foods, 500, Things.get_value) # doctest: +NORMALIZE_WHITESPACE
([Things(Chicken, 110, 85), Things(Pizza, 100, 60), Things(Fries, 90, 55),
Things(Burger, 80, 40), Things(Rice, 70, 70), Things(Coca Cola, 60, 40),
Things(Milk, 60, 70)], 570.0)
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Things
|
python
|
getsentry__sentry
|
src/sentry/integrations/utils/metrics.py
|
{
"start": 16359,
"end": 16552
}
|
class ____(StrEnum):
"""An instance to be recorded of a integration proxy event."""
SHOULD_PROXY = "should_proxy"
PROXY_REQUEST = "proxy_request"
@dataclass
|
IntegrationProxyEventType
|
python
|
ray-project__ray
|
python/ray/_private/ray_logging/__init__.py
|
{
"start": 6080,
"end": 7148
}
|
class ____:
def __init__(self):
self.handlers = []
self._lock = threading.Lock()
def add_handler(self, name: str, handler: Callable) -> None:
with self._lock:
self.handlers.append((name, handler))
def remove_handler(self, name: str) -> None:
with self._lock:
new_handlers = [pair for pair in self.handlers if pair[0] != name]
self.handlers = new_handlers
def emit(self, data):
with self._lock:
for pair in self.handlers:
_, handle = pair
handle(data)
global_worker_stdstream_dispatcher = WorkerStandardStreamDispatcher()
# Regex for canonicalizing log lines.
NUMBERS = re.compile(r"(\d+|0x[0-9a-fA-F]+)")
# Batch of log lines including ip, pid, lines, etc.
LogBatch = Dict[str, Any]
def _canonicalise_log_line(line):
# Remove words containing numbers or hex, since those tend to differ between
# workers.
return " ".join(x for x in line.split() if not NUMBERS.search(x))
@dataclass
|
WorkerStandardStreamDispatcher
|
python
|
nedbat__coveragepy
|
tests/test_report.py
|
{
"start": 40137,
"end": 45515
}
|
class ____(CoverageTest):
"""Tests of SummaryReporter."""
def make_rigged_file(self, filename: str, stmts: int, miss: int) -> None:
"""Create a file that will have specific results.
`stmts` and `miss` are ints, the number of statements, and
missed statements that should result.
"""
run = stmts - miss - 1
dont_run = miss
source = ""
source += "a = 1\n" * run
source += "if a == 99:\n"
source += " a = 2\n" * dont_run
self.make_file(filename, source)
def get_summary_text(self, *options: tuple[str, TConfigValueIn]) -> str:
"""Get text output from the SummaryReporter.
The arguments are tuples: (name, value) for Coverage.set_option.
"""
self.make_rigged_file("file1.py", 339, 155)
self.make_rigged_file("file2.py", 13, 3)
self.make_rigged_file("file10.py", 234, 228)
self.make_file("doit.py", "import file1, file2, file10")
cov = Coverage(source=["."], omit=["doit.py"])
self.start_import_stop(cov, "doit")
for name, value in options:
cov.set_option(name, value)
printer = SummaryReporter(cov)
destination = io.StringIO()
printer.report([], destination)
return destination.getvalue()
def test_test_data(self) -> None:
# We use our own test files as test data. Check that our assumptions
# about them are still valid. We want the three columns of numbers to
# sort in three different orders.
report = self.get_summary_text()
# Name Stmts Miss Cover
# ------------------------------
# file1.py 339 155 54%
# file2.py 13 3 77%
# file10.py 234 228 3%
# ------------------------------
# TOTAL 586 386 34%
lines = report.splitlines()[2:-2]
assert len(lines) == 3
nums = [list(map(int, l.replace("%", "").split()[1:])) for l in lines]
# [
# [339, 155, 54],
# [ 13, 3, 77],
# [234, 228, 3]
# ]
assert nums[1][0] < nums[2][0] < nums[0][0]
assert nums[1][1] < nums[0][1] < nums[2][1]
assert nums[2][2] < nums[0][2] < nums[1][2]
def test_defaults(self) -> None:
"""Run the report with no configuration options."""
report = self.get_summary_text()
assert "Missing" not in report
assert "Branch" not in report
def test_print_missing(self) -> None:
"""Run the report printing the missing lines."""
report = self.get_summary_text(("report:show_missing", True))
assert "Missing" in report
assert "Branch" not in report
def assert_ordering(self, text: str, *words: str) -> None:
"""Assert that the `words` appear in order in `text`."""
indexes = list(map(text.find, words))
assert -1 not in indexes
msg = f"The words {words!r} don't appear in order in {text!r}"
assert indexes == sorted(indexes), msg
def test_default_sort_report(self) -> None:
# Sort the text report by the default (Name) column.
report = self.get_summary_text()
self.assert_ordering(report, "file1.py", "file2.py", "file10.py")
def test_sort_report_by_name(self) -> None:
# Sort the text report explicitly by the Name column.
report = self.get_summary_text(("report:sort", "Name"))
self.assert_ordering(report, "file1.py", "file2.py", "file10.py")
def test_sort_report_by_stmts(self) -> None:
# Sort the text report by the Stmts column.
report = self.get_summary_text(("report:sort", "Stmts"))
self.assert_ordering(report, "file2.py", "file10.py", "file1.py")
def test_sort_report_by_missing(self) -> None:
# Sort the text report by the Missing column.
report = self.get_summary_text(("report:sort", "Miss"))
self.assert_ordering(report, "file2.py", "file1.py", "file10.py")
def test_sort_report_by_cover(self) -> None:
# Sort the text report by the Cover column.
report = self.get_summary_text(("report:sort", "Cover"))
self.assert_ordering(report, "file10.py", "file1.py", "file2.py")
def test_sort_report_by_cover_plus(self) -> None:
# Sort the text report by the Cover column, including the explicit + sign.
report = self.get_summary_text(("report:sort", "+Cover"))
self.assert_ordering(report, "file10.py", "file1.py", "file2.py")
def test_sort_report_by_cover_reversed(self) -> None:
# Sort the text report by the Cover column reversed.
report = self.get_summary_text(("report:sort", "-Cover"))
self.assert_ordering(report, "file2.py", "file1.py", "file10.py")
def test_sort_report_by_invalid_option(self) -> None:
# Sort the text report by a nonsense column.
msg = "Invalid sorting option: 'Xyzzy'"
with pytest.raises(ConfigError, match=msg):
self.get_summary_text(("report:sort", "Xyzzy"))
def test_report_with_invalid_format(self) -> None:
# Ask for an invalid format.
msg = "Unknown report format choice: 'xyzzy'"
with pytest.raises(ConfigError, match=msg):
self.get_summary_text(("report:format", "xyzzy"))
|
SummaryReporterConfigurationTest
|
python
|
cython__cython
|
tests/run/pep3135_class_cell.py
|
{
"start": 2597,
"end": 2971
}
|
class ____:
"""
>>> obj = CE()
>>> obj.method()().__name__
'CE'
>>> obj.method2()()().__name__
'CE'
"""
def method(self):
def inner(): return __class__
return inner
def method2(self):
def inner():
def inner_inner():
return __class__
return inner_inner
return inner
|
CE
|
python
|
astropy__astropy
|
astropy/units/tests/test_quantity.py
|
{
"start": 66375,
"end": 66606
}
|
class ____:
def __init__(self, value, unit):
self.value = value
self.unit = unit
def __array__(self, dtype=None, copy=COPY_IF_NEEDED):
return np.array(self.value, dtype=dtype, copy=copy)
|
QuantityMimic
|
python
|
openai__openai-python
|
src/openai/types/responses/response_function_web_search_param.py
|
{
"start": 383,
"end": 571
}
|
class ____(TypedDict, total=False):
type: Required[Literal["url"]]
"""The type of source. Always `url`."""
url: Required[str]
"""The URL of the source."""
|
ActionSearchSource
|
python
|
doocs__leetcode
|
solution/2600-2699/2611.Mice and Cheese/Solution2.py
|
{
"start": 0,
"end": 250
}
|
class ____:
def miceAndCheese(self, reward1: List[int], reward2: List[int], k: int) -> int:
for i, x in enumerate(reward2):
reward1[i] -= x
reward1.sort(reverse=True)
return sum(reward2) + sum(reward1[:k])
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/tags/tag_set.py
|
{
"start": 437,
"end": 1946
}
|
class ____(NamespacedKVSet):
"""Extend this class to define a set of tags in the same namespace.
Supports splatting to a dictionary that can be placed inside a tags argument along with
other tags.
.. code-block:: python
my_tags: NamespacedTagsSet = ...
@asset(
tags={**my_tags}
)
def my_asset():
pass
"""
def __init__(self, *args, **kwargs) -> None:
for field_name, field in model_fields(self.__class__).items():
annotation_type = field.annotation
is_optional = is_closed_python_optional_type(annotation_type)
is_optional_str = is_optional and str in get_args(annotation_type)
is_optional_literal = (
is_optional and get_origin(get_args(annotation_type)[0]) == Literal
)
if not (
is_optional_str
or annotation_type is str
or is_optional_literal
or annotation_type is Literal
):
check.failed(
f"Type annotation for field '{field_name}' is not str, Optional[str], Literal, "
f"or Optional[Literal]. Is {annotation_type}."
)
super().__init__(*args, **kwargs)
@classmethod
def _extract_value(cls, field_name: str, value: Any) -> str:
"""Since all tag values are strings, we don't need to do any type coercion."""
return cast("str", value)
|
NamespacedTagSet
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/target/preserve_defaults.py
|
{
"start": 366,
"end": 1050
}
|
class ____:
"""docstring"""
def meth(
self,
name: str = CONSTANT,
sentinel: Any = SENTINEL,
now: datetime = datetime.now(), # NoQA: B008,DTZ005
color: int = 0xFFFFFF,
*,
kwarg1,
kwarg2=0xFFFFFF,
) -> None:
"""docstring"""
@classmethod
def clsmeth(
cls,
name: str = CONSTANT,
sentinel: Any = SENTINEL,
now: datetime = datetime.now(), # NoQA: B008,DTZ005
color: int = 0xFFFFFF,
*,
kwarg1,
kwarg2=0xFFFFFF,
) -> None:
"""docstring"""
get_sentinel = lambda custom=SENTINEL: custom # NoQA: E731
"""docstring"""
|
Class
|
python
|
tensorflow__tensorflow
|
tensorflow/python/debug/cli/debugger_cli_common.py
|
{
"start": 27447,
"end": 32831
}
|
class ____:
"""Registry for tab completion responses."""
def __init__(self):
self._comp_dict = {}
# TODO(cais): Rename method names with "comp" to "*completion*" to avoid
# confusion.
def register_tab_comp_context(self, context_words, comp_items):
"""Register a tab-completion context.
Register that, for each word in context_words, the potential tab-completions
are the words in comp_items.
A context word is a pre-existing, completed word in the command line that
determines how tab-completion works for another, incomplete word in the same
command line.
Completion items consist of potential candidates for the incomplete word.
To give a general example, a context word can be "drink", and the completion
items can be ["coffee", "tea", "water"]
Note: A context word can be empty, in which case the context is for the
top-level commands.
Args:
context_words: A list of context words belonging to the context being
registered. It is a list of str, instead of a single string, to support
synonym words triggering the same tab-completion context, e.g.,
both "drink" and the short-hand "dr" can trigger the same context.
comp_items: A list of completion items, as a list of str.
Raises:
TypeError: if the input arguments are not all of the correct types.
"""
if not isinstance(context_words, list):
raise TypeError("Incorrect type in context_list: Expected list, got %s" %
type(context_words))
if not isinstance(comp_items, list):
raise TypeError("Incorrect type in comp_items: Expected list, got %s" %
type(comp_items))
# Sort the completion items on registration, so that later during
# get_completions calls, no sorting will be necessary.
sorted_comp_items = sorted(comp_items)
for context_word in context_words:
self._comp_dict[context_word] = sorted_comp_items
def deregister_context(self, context_words):
"""Deregister a list of context words.
Args:
context_words: A list of context words to deregister, as a list of str.
Raises:
KeyError: if there are word(s) in context_words that do not correspond
to any registered contexts.
"""
for context_word in context_words:
if context_word not in self._comp_dict:
raise KeyError("Cannot deregister unregistered context word \"%s\"" %
context_word)
for context_word in context_words:
del self._comp_dict[context_word]
def extend_comp_items(self, context_word, new_comp_items):
"""Add a list of completion items to a completion context.
Args:
context_word: A single completion word as a string. The extension will
also apply to all other context words of the same context.
new_comp_items: (list of str) New completion items to add.
Raises:
KeyError: if the context word has not been registered.
"""
if context_word not in self._comp_dict:
raise KeyError("Context word \"%s\" has not been registered" %
context_word)
self._comp_dict[context_word].extend(new_comp_items)
self._comp_dict[context_word] = sorted(self._comp_dict[context_word])
def remove_comp_items(self, context_word, comp_items):
"""Remove a list of completion items from a completion context.
Args:
context_word: A single completion word as a string. The removal will
also apply to all other context words of the same context.
comp_items: Completion items to remove.
Raises:
KeyError: if the context word has not been registered.
"""
if context_word not in self._comp_dict:
raise KeyError("Context word \"%s\" has not been registered" %
context_word)
for item in comp_items:
self._comp_dict[context_word].remove(item)
def get_completions(self, context_word, prefix):
"""Get the tab completions given a context word and a prefix.
Args:
context_word: The context word.
prefix: The prefix of the incomplete word.
Returns:
(1) None if no registered context matches the context_word.
A list of str for the matching completion items. Can be an empty list
of a matching context exists, but no completion item matches the
prefix.
(2) Common prefix of all the words in the first return value. If the
first return value is None, this return value will be None, too. If
the first return value is not None, i.e., a list, this return value
will be a str, which can be an empty str if there is no common
prefix among the items of the list.
"""
if context_word not in self._comp_dict:
return None, None
comp_items = self._comp_dict[context_word]
comp_items = sorted(
[item for item in comp_items if item.startswith(prefix)])
return comp_items, self._common_prefix(comp_items)
def _common_prefix(self, m):
"""Given a list of str, returns the longest common prefix.
Args:
m: (list of str) A list of strings.
Returns:
(str) The longest common prefix.
"""
if not m:
return ""
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
|
TabCompletionRegistry
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/stackexchange/provider.py
|
{
"start": 463,
"end": 1156
}
|
class ____(OAuth2Provider):
id = "stackexchange"
name = "Stack Exchange"
account_class = StackExchangeAccount
oauth2_adapter_class = StackExchangeOAuth2Adapter
def get_site(self):
settings = self.get_settings()
return settings.get("SITE", "stackoverflow")
def extract_uid(self, data):
# `user_id` varies if you use the same account for
# e.g. StackOverflow and ServerFault. Therefore, we pick
# `account_id`.
uid = str(data["account_id"])
return uid
def extract_common_fields(self, data):
return dict(username=data.get("display_name"))
provider_classes = [StackExchangeProvider]
|
StackExchangeProvider
|
python
|
pydata__xarray
|
xarray/backends/file_manager.py
|
{
"start": 854,
"end": 1943
}
|
class ____(Generic[T_File]):
"""Manager for acquiring and closing a file object.
Use FileManager subclasses (CachingFileManager in particular) on backend
storage classes to automatically handle issues related to keeping track of
many open files and transferring them between multiple processes.
"""
def acquire(self, needs_lock: bool = True) -> T_File:
"""Acquire the file object from this manager."""
raise NotImplementedError()
def acquire_context(
self, needs_lock: bool = True
) -> AbstractContextManager[T_File]:
"""Context manager for acquiring a file. Yields a file object.
The context manager unwinds any actions taken as part of acquisition
(i.e., removes it from any cache) if an exception is raised from the
context. It *does not* automatically close the file.
"""
raise NotImplementedError()
def close(self, needs_lock: bool = True) -> None:
"""Close the file object associated with this manager, if needed."""
raise NotImplementedError()
|
FileManager
|
python
|
PrefectHQ__prefect
|
tests/cli/transfer/test_work_queues.py
|
{
"start": 416,
"end": 15588
}
|
class ____:
async def test_construct_creates_new_instance(self, transfer_work_queue: WorkQueue):
"""Test that construct creates a new MigratableWorkQueue instance."""
# Clear any existing instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue)
assert isinstance(migratable, MigratableWorkQueue)
assert migratable.source_work_queue == transfer_work_queue
assert migratable.source_id == transfer_work_queue.id
assert migratable.destination_work_queue is None
assert migratable.destination_id is None
assert migratable._dependencies == []
async def test_construct_returns_cached_instance(
self, transfer_work_queue: WorkQueue
):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableWorkQueue._instances.clear()
# Create first instance
migratable1 = await MigratableWorkQueue.construct(transfer_work_queue)
# Create second instance with same work queue
migratable2 = await MigratableWorkQueue.construct(transfer_work_queue)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableWorkQueue._instances) == 1
async def test_get_instance_returns_cached_instance(
self, transfer_work_queue: WorkQueue
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableWorkQueue._instances.clear()
# Create instance
migratable = await MigratableWorkQueue.construct(transfer_work_queue)
# Retrieve instance
retrieved = await MigratableWorkQueue.get_instance(transfer_work_queue.id)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableWorkQueue._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableWorkQueue.get_instance(unknown_id)
assert retrieved is None
@patch(
"prefect.cli.transfer._migratable_resources.work_queues.construct_migratable_resource"
)
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_get_dependencies_with_work_pool_name(
self, mock_get_client: MagicMock, mock_construct_resource: AsyncMock
):
"""Test get_dependencies with work pool name dependency."""
# Create work queue with work pool name
work_queue = WorkQueue(
id=uuid.uuid4(),
name="test-queue",
description="Test queue",
priority=1,
concurrency_limit=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name="test-work-pool",
)
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock work pool read
mock_work_pool = MagicMock()
mock_work_pool.name = "test-work-pool"
mock_client.read_work_pool.return_value = mock_work_pool
mock_migratable_work_pool = MagicMock()
mock_construct_resource.return_value = mock_migratable_work_pool
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(work_queue)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_work_pool
mock_client.read_work_pool.assert_called_once_with("test-work-pool")
mock_construct_resource.assert_called_once_with(mock_work_pool)
@patch(
"prefect.cli.transfer._migratable_resources.work_pools.MigratableWorkPool.get_instance_by_name"
)
async def test_get_dependencies_with_cached_work_pool_dependency(
self, mock_get_instance_by_name: AsyncMock
):
"""Test get_dependencies with cached work pool dependency."""
# Create work queue with work pool name
work_queue = WorkQueue(
id=uuid.uuid4(),
name="test-queue",
description="Test queue",
priority=1,
concurrency_limit=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name="test-work-pool",
)
# Mock cached work pool dependency
mock_migratable_work_pool = MagicMock()
mock_get_instance_by_name.return_value = mock_migratable_work_pool
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(work_queue)
dependencies = await migratable.get_dependencies()
assert len(dependencies) == 1
assert dependencies[0] == mock_migratable_work_pool
mock_get_instance_by_name.assert_called_once_with(name="test-work-pool")
async def test_get_dependencies_with_no_work_pool_name(
self, transfer_work_queue: WorkQueue
):
"""Test get_dependencies with no work pool name (standalone queue)."""
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue)
dependencies = await migratable.get_dependencies()
assert dependencies == []
async def test_get_dependencies_cached(self):
"""Test that dependencies are cached after first call."""
# Create work queue with work pool name
work_queue = WorkQueue(
id=uuid.uuid4(),
name="test-queue",
description="Test queue",
priority=1,
concurrency_limit=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name="test-work-pool",
)
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(work_queue)
# Set up some mock dependencies
mock_dependency = MagicMock()
migratable._dependencies = [mock_dependency]
dependencies1 = await migratable.get_dependencies()
dependencies2 = await migratable.get_dependencies()
# Should return the same cached result
assert dependencies1 == dependencies2
assert dependencies1 == [mock_dependency]
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_migrate_success_standalone_queue(
self, mock_get_client: MagicMock, transfer_work_queue: WorkQueue
):
"""Test successful migration of standalone work queue."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock successful work queue creation
destination_work_queue = WorkQueue(
id=uuid.uuid4(),
name=transfer_work_queue.name,
description=transfer_work_queue.description,
priority=transfer_work_queue.priority,
concurrency_limit=transfer_work_queue.concurrency_limit,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=None,
work_pool_name=None,
)
mock_client.create_work_queue.return_value = destination_work_queue
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue)
await migratable.migrate()
# Verify client calls
mock_client.create_work_queue.assert_called_once_with(
name=transfer_work_queue.name,
description=transfer_work_queue.description,
priority=transfer_work_queue.priority,
concurrency_limit=transfer_work_queue.concurrency_limit,
work_pool_name=transfer_work_queue.work_pool_name,
)
# Verify destination_work_queue is set
assert migratable.destination_work_queue == destination_work_queue
assert migratable.destination_id == destination_work_queue.id
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_migrate_success_with_work_pool(
self, mock_get_client: MagicMock, transfer_work_queue_with_pool: WorkQueue
):
"""Test successful migration of work queue with work pool."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock successful work queue creation
destination_work_queue = WorkQueue(
id=uuid.uuid4(),
name=transfer_work_queue_with_pool.name,
description=transfer_work_queue_with_pool.description,
priority=transfer_work_queue_with_pool.priority,
concurrency_limit=transfer_work_queue_with_pool.concurrency_limit,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name="test-pool",
)
mock_client.create_work_queue.return_value = destination_work_queue
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue_with_pool)
await migratable.migrate()
# Verify client calls
mock_client.create_work_queue.assert_called_once_with(
name=transfer_work_queue_with_pool.name,
description=transfer_work_queue_with_pool.description,
priority=transfer_work_queue_with_pool.priority,
concurrency_limit=transfer_work_queue_with_pool.concurrency_limit,
work_pool_name=transfer_work_queue_with_pool.work_pool_name,
)
# Verify destination_work_queue is set
assert migratable.destination_work_queue == destination_work_queue
assert migratable.destination_id == destination_work_queue.id
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_migrate_already_exists(
self, mock_get_client: MagicMock, transfer_work_queue_with_pool: WorkQueue
):
"""Test migration when work queue already exists."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_work_queue.side_effect = ObjectAlreadyExists(mock_http_exc)
# Mock existing work queue in read_work_queues response
existing_work_queue = WorkQueue(
id=uuid.uuid4(),
name=transfer_work_queue_with_pool.name,
description="existing description", # Different to show it reads existing
priority=2,
concurrency_limit=10,
is_paused=True,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name="test-pool",
)
mock_client.read_work_queues.return_value = [existing_work_queue]
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue_with_pool)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify client calls
mock_client.create_work_queue.assert_called_once()
mock_client.read_work_queues.assert_called_once_with(
work_pool_name=transfer_work_queue_with_pool.work_pool_name,
work_queue_filter=WorkQueueFilter(
name=WorkQueueFilterName(any_=[transfer_work_queue_with_pool.name]),
),
)
# Verify destination_work_queue is set to existing
assert migratable.destination_work_queue == existing_work_queue
assert migratable.destination_id == existing_work_queue.id
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_migrate_already_exists_queue_not_found_in_list(
self, mock_get_client: MagicMock, transfer_work_queue_with_pool: WorkQueue
):
"""Test migration when work queue already exists but is not found in list."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_work_queue.side_effect = ObjectAlreadyExists(mock_http_exc)
# Mock empty work queues list (queue not found)
mock_client.read_work_queues.return_value = []
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(transfer_work_queue_with_pool)
with pytest.raises(RuntimeError):
await migratable.migrate()
# Verify calls
mock_client.create_work_queue.assert_called_once()
mock_client.read_work_queues.assert_called_once()
# destination_work_queue should remain None since we couldn't find it
assert migratable.destination_work_queue is None
@patch("prefect.cli.transfer._migratable_resources.work_queues.get_client")
async def test_migrate_skips_default_work_queue(self, mock_get_client: MagicMock):
"""Test that migration skips work queues named 'default'."""
# Create a work queue with name 'default'
default_work_queue = WorkQueue(
id=uuid.uuid4(),
name="default",
description="Default work queue",
priority=1,
concurrency_limit=None,
is_paused=False,
last_polled=None,
status=None,
work_pool_id=uuid.uuid4(),
work_pool_name="test-pool",
)
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock empty work queues list (queue not found)
mock_client.read_work_queues.return_value = [default_work_queue]
# Clear instances
MigratableWorkQueue._instances.clear()
migratable = await MigratableWorkQueue.construct(default_work_queue)
# Should raise TransferSkipped for default work queue
with pytest.raises(
TransferSkipped,
match="Default work queues are created with work pools",
):
await migratable.migrate()
# Verify no client calls were made since it's skipped early
assert migratable.destination_work_queue is None
|
TestMigratableWorkQueue
|
python
|
sympy__sympy
|
sympy/series/sequences.py
|
{
"start": 27276,
"end": 28586
}
|
class ____(SeqBase):
"""
Base class for operations on sequences.
Examples
========
>>> from sympy.series.sequences import SeqExprOp, sequence
>>> from sympy.abc import n
>>> s1 = sequence(n**2, (n, 0, 10))
>>> s2 = sequence((1, 2, 3), (n, 5, 10))
>>> s = SeqExprOp(s1, s2)
>>> s.gen
(n**2, (1, 2, 3))
>>> s.interval
Interval(5, 10)
>>> s.length
6
See Also
========
sympy.series.sequences.SeqAdd
sympy.series.sequences.SeqMul
"""
@property
def gen(self):
"""Generator for the sequence.
returns a tuple of generators of all the argument sequences.
"""
return tuple(a.gen for a in self.args)
@property
def interval(self):
"""Sequence is defined on the intersection
of all the intervals of respective sequences
"""
return Intersection(*(a.interval for a in self.args))
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def variables(self):
"""Cumulative of all the bound variables"""
return tuple(flatten([a.variables for a in self.args]))
@property
def length(self):
return self.stop - self.start + 1
|
SeqExprOp
|
python
|
pandas-dev__pandas
|
scripts/check_test_naming.py
|
{
"start": 301,
"end": 5234
}
|
class ____ function definition. Though hopefully that shouldn't be necessary.
"""
from __future__ import annotations
import argparse
import ast
import os
from pathlib import Path
import sys
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import (
Iterator,
Sequence,
)
PRAGMA = "# not a test"
def _find_names(node: ast.Module) -> Iterator[str]:
for _node in ast.walk(node):
if isinstance(_node, ast.Name):
yield _node.id
elif isinstance(_node, ast.Attribute):
yield _node.attr
def _is_fixture(node: ast.expr) -> bool:
if isinstance(node, ast.Call):
node = node.func
return (
isinstance(node, ast.Attribute)
and node.attr == "fixture"
and isinstance(node.value, ast.Name)
and node.value.id == "pytest"
)
def _is_register_dtype(node):
return isinstance(node, ast.Name) and node.id == "register_extension_dtype"
def is_misnamed_test_func(
node: ast.expr | ast.stmt, names: Sequence[str], line: str
) -> bool:
return (
isinstance(node, ast.FunctionDef)
and not node.name.startswith("test")
and names.count(node.name) == 0
and not any(_is_fixture(decorator) for decorator in node.decorator_list)
and PRAGMA not in line
and node.name
not in ("teardown_method", "setup_method", "teardown_class", "setup_class")
)
def is_misnamed_test_class(
node: ast.expr | ast.stmt, names: Sequence[str], line: str
) -> bool:
return (
isinstance(node, ast.ClassDef)
and not node.name.startswith("Test")
and names.count(node.name) == 0
and not any(_is_register_dtype(decorator) for decorator in node.decorator_list)
and PRAGMA not in line
)
def main(content: str, file: str) -> int:
lines = content.splitlines()
tree = ast.parse(content)
names = list(_find_names(tree))
ret = 0
for node in tree.body:
if is_misnamed_test_func(node, names, lines[node.lineno - 1]):
print(
f"{file}:{node.lineno}:{node.col_offset} "
"found test function which does not start with 'test'"
)
ret = 1
elif is_misnamed_test_class(node, names, lines[node.lineno - 1]):
print(
f"{file}:{node.lineno}:{node.col_offset} "
"found test class which does not start with 'Test'"
)
ret = 1
if (
isinstance(node, ast.ClassDef)
and names.count(node.name) == 0
and not any(
_is_register_dtype(decorator) for decorator in node.decorator_list
)
and PRAGMA not in lines[node.lineno - 1]
):
for _node in node.body:
if is_misnamed_test_func(_node, names, lines[_node.lineno - 1]):
# It could be that this function is used somewhere by the
# parent class. For example, there might be a base class
# with
#
# class Foo:
# def foo(self):
# assert 1+1==2
# def test_foo(self):
# self.foo()
#
# and then some subclass overwrites `foo`. So, we check that
# `self.foo` doesn't appear in any of the test classes.
# Note some false negatives might get through, but that's OK.
# This is good enough that has helped identify several examples
# of tests not being run.
assert isinstance(_node, ast.FunctionDef) # help mypy
should_continue = False
for _file in (Path("pandas") / "tests").rglob("*.py"):
with open(os.path.join(_file), encoding="utf-8") as fd:
_content = fd.read()
if f"self.{_node.name}" in _content:
should_continue = True
break
if should_continue:
continue
print(
f"{file}:{_node.lineno}:{_node.col_offset} "
"found test function which does not start with 'test'"
)
ret = 1
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="*")
args = parser.parse_args()
ret = 0
for file in args.paths:
filename = os.path.basename(file)
if not (filename.startswith("test") and filename.endswith(".py")):
continue
with open(file, encoding="utf-8") as fd:
content = fd.read()
ret |= main(content, file)
sys.exit(ret)
|
or
|
python
|
huggingface__transformers
|
tests/models/vivit/test_modeling_vivit.py
|
{
"start": 5736,
"end": 12561
}
|
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Vivit does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (VivitModel, VivitForVideoClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": VivitModel, "video-classification": VivitForVideoClassification}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = VivitModelTester(self)
self.config_tester = ConfigTester(self, config_class=VivitConfig, has_text_modality=False, hidden_size=37)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING):
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Vivit does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
self.assertEqual(arg_names[0], "pixel_values")
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_video_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "google/vivit-b-16x2-kinetics400"
model = VivitModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
seq_len = self.model_tester.seq_length
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# We will verify our results on a video of eating spaghetti
# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227]
def prepare_video():
file = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset"
)
video = np.load(file)
return list(video)
@require_torch
@require_vision
|
VivitModelTest
|
python
|
Unity-Technologies__ml-agents
|
ml-agents/mlagents/trainers/model_saver/torch_model_saver.py
|
{
"start": 667,
"end": 6620
}
|
class ____(BaseModelSaver):
"""
ModelSaver class for PyTorch
"""
def __init__(
self, trainer_settings: TrainerSettings, model_path: str, load: bool = False
):
super().__init__()
self.model_path = model_path
self.initialize_path = trainer_settings.init_path
self._keep_checkpoints = trainer_settings.keep_checkpoints
self.load = load
self.policy: Optional[TorchPolicy] = None
self.exporter: Optional[ModelSerializer] = None
self.modules: Dict[str, torch.nn.Modules] = {}
def register(self, module: Union[TorchPolicy, TorchOptimizer]) -> None:
if isinstance(module, TorchPolicy) or isinstance(module, TorchOptimizer):
self.modules.update(module.get_modules()) # type: ignore
else:
raise UnityPolicyException(
"Registering Object of unsupported type {} to ModelSaver ".format(
type(module)
)
)
if self.policy is None and isinstance(module, TorchPolicy):
self.policy = module
self.exporter = ModelSerializer(self.policy)
def save_checkpoint(self, behavior_name: str, step: int) -> Tuple[str, List[str]]:
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
checkpoint_path = os.path.join(self.model_path, f"{behavior_name}-{step}")
state_dict = {
name: module.state_dict() for name, module in self.modules.items()
}
pytorch_ckpt_path = f"{checkpoint_path}.pt"
export_ckpt_path = f"{checkpoint_path}.onnx"
torch.save(state_dict, f"{checkpoint_path}.pt")
torch.save(state_dict, os.path.join(self.model_path, DEFAULT_CHECKPOINT_NAME))
self.export(checkpoint_path, behavior_name)
return export_ckpt_path, [pytorch_ckpt_path]
def export(self, output_filepath: str, behavior_name: str) -> None:
if self.exporter is not None:
self.exporter.export_policy_model(output_filepath)
def initialize_or_load(self, policy: Optional[TorchPolicy] = None) -> None:
# Initialize/Load registered self.policy by default.
# If given input argument policy, use the input policy instead.
# This argument is mainly for initialization of the ghost trainer's fixed policy.
reset_steps = not self.load
if self.initialize_path is not None:
logger.info(f"Initializing from {self.initialize_path}.")
self._load_model(
self.initialize_path, policy, reset_global_steps=reset_steps
)
elif self.load:
logger.info(f"Resuming from {self.model_path}.")
self._load_model(
os.path.join(self.model_path, DEFAULT_CHECKPOINT_NAME),
policy,
reset_global_steps=reset_steps,
)
def _load_model(
self,
load_path: str,
policy: Optional[TorchPolicy] = None,
reset_global_steps: bool = False,
) -> None:
saved_state_dict = torch.load(load_path)
if policy is None:
modules = self.modules
policy = self.policy
else:
modules = policy.get_modules()
policy = cast(TorchPolicy, policy)
for name, mod in modules.items():
try:
if isinstance(mod, torch.nn.Module):
missing_keys, unexpected_keys = mod.load_state_dict(
saved_state_dict[name], strict=False
)
if missing_keys:
logger.warning(
f"Did not find these keys {missing_keys} in checkpoint. Initializing."
)
if unexpected_keys:
logger.warning(
f"Did not expect these keys {unexpected_keys} in checkpoint. Ignoring."
)
else:
# If module is not an nn.Module, try to load as one piece
mod.load_state_dict(saved_state_dict[name])
# KeyError is raised if the module was not present in the last run but is being
# accessed in the saved_state_dict.
# ValueError is raised by the optimizer's load_state_dict if the parameters have
# have changed. Note, the optimizer uses a completely different load_state_dict
# function because it is not an nn.Module.
# RuntimeError is raised by PyTorch if there is a size mismatch between modules
# of the same name. This will still partially assign values to those layers that
# have not changed shape.
except (KeyError, ValueError, RuntimeError) as err:
logger.warning(f"Failed to load for module {name}. Initializing")
logger.debug(f"Module loading error : {err}")
if reset_global_steps:
policy.set_step(0)
logger.info(
"Starting training from step 0 and saving to {}.".format(
self.model_path
)
)
else:
logger.info(f"Resuming training from step {policy.get_current_step()}.")
def copy_final_model(self, source_nn_path: str) -> None:
"""
Copy the .nn file at the given source to the destination.
Also copies the corresponding .onnx file if it exists.
"""
final_model_name = os.path.splitext(source_nn_path)[0]
if SerializationSettings.convert_to_onnx:
try:
source_path = f"{final_model_name}.onnx"
destination_path = f"{self.model_path}.onnx"
shutil.copyfile(source_path, destination_path)
logger.info(f"Copied {source_path} to {destination_path}.")
except OSError:
pass
|
TorchModelSaver
|
python
|
python-pillow__Pillow
|
Tests/test_image_resample.py
|
{
"start": 8436,
"end": 10004
}
|
class ____:
def make_case(
self, mode: str, fill: tuple[int, int, int] | float
) -> tuple[Image.Image, float | tuple[int, ...]]:
im = Image.new(mode, (512, 9), fill)
px = im.load()
assert px is not None
return im.resize((9, 512), Image.Resampling.LANCZOS), px[0, 0]
def run_case(self, case: tuple[Image.Image, float | tuple[int, ...]]) -> None:
channel, color = case
px = channel.load()
assert px is not None
for x in range(channel.size[0]):
for y in range(channel.size[1]):
if px[x, y] != color:
message = f"{px[x, y]} != {color} for pixel {(x, y)}"
assert px[x, y] == color, message
def test_8u(self) -> None:
im, color = self.make_case("RGB", (0, 64, 255))
r, g, b = im.split()
assert isinstance(color, tuple)
self.run_case((r, color[0]))
self.run_case((g, color[1]))
self.run_case((b, color[2]))
self.run_case(self.make_case("L", 12))
def test_32i(self) -> None:
self.run_case(self.make_case("I", 12))
self.run_case(self.make_case("I", 0x7FFFFFFF))
self.run_case(self.make_case("I", -12))
self.run_case(self.make_case("I", -1 << 31))
def test_32f(self) -> None:
self.run_case(self.make_case("F", 1))
self.run_case(self.make_case("F", 3.40282306074e38))
self.run_case(self.make_case("F", 1.175494e-38))
self.run_case(self.make_case("F", 1.192093e-07))
|
TestCoreResampleConsistency
|
python
|
PrefectHQ__prefect
|
tests/cli/test_server_services.py
|
{
"start": 945,
"end": 3749
}
|
class ____:
def test_start_and_stop_services(self, pid_file: Path):
invoke_and_assert(
command=[
"server",
"services",
"start",
"--background",
],
expected_output_contains="Services are running in the background.",
expected_code=0,
)
assert pid_file.exists(), "Services PID file does not exist"
invoke_and_assert(
command=[
"server",
"services",
"stop",
],
expected_output_contains="All services stopped.",
expected_code=0,
)
assert not pid_file.exists(), "Services PID file still exists"
def test_start_duplicate_services(self, pid_file: Path):
invoke_and_assert(
command=[
"server",
"services",
"start",
"--background",
],
expected_output_contains="Services are running in the background.",
expected_code=0,
)
assert pid_file.exists(), "PID file should exist before duplicate test"
invoke_and_assert(
command=[
"server",
"services",
"start",
"--background",
],
expected_output_contains="Services are already running in the background.",
expected_code=1,
)
invoke_and_assert(
command=[
"server",
"services",
"stop",
],
expected_output_contains="All services stopped.",
expected_code=0,
)
def test_stop_stale_pid_file(self, pid_file: Path):
pid_file.parent.mkdir(parents=True, exist_ok=True)
pid_file.write_text("99999") # Use a likely unused PID
invoke_and_assert(
command=[
"server",
"services",
"stop",
],
expected_output_contains="Services were not running",
expected_output_does_not_contain="All services stopped.",
expected_code=0,
)
assert not pid_file.exists(), "Services PID file still exists"
def test_list_services(self):
invoke_and_assert(
command=[
"server",
"services",
"ls",
],
expected_output_contains=[
"Available Services",
"MarkLateRuns",
"PREFECT_SERVER_SERVICES_LATE_RUNS_ENABLED",
"Telemetry",
"PREFECT_SERVER_ANALYTICS_ENABLED",
],
expected_code=0,
)
|
TestBackgroundServices
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/commands/ci/state.py
|
{
"start": 1092,
"end": 2007
}
|
class ____(BaseModel, extra=Extra.forbid):
# we intentionally don't save api_token here for security reasons
url: str
deployment_name: str
location_file: str
location_name: str
is_branch_deployment: bool
selected: bool = True
build: BuildMetadata
build_output: Optional[Union[DockerBuildOutput, PexBuildOutput]] = Field(
None, discriminator="strategy"
)
defs_state_info: Optional[DefsStateInfo] = None
status_url: Optional[str] # link to cicd run url when building and dagster cloud url when done
history: list[StatusChange] = []
project_dir: Optional[str] = None
def add_status_change(self, status: LocationStatus, log: str):
self.history.append(
StatusChange(
timestamp=datetime.datetime.now(datetime.timezone.utc),
status=status,
log=log,
)
)
|
LocationState
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_model_retry.py
|
{
"start": 1844,
"end": 20743
}
|
class ____(FakeToolCallingModel):
"""Model that always fails with a specific exception."""
error_message: str = Field(default="Model error")
error_type: type[Exception] = Field(default=ValueError)
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Execute the model and raise exception.
Args:
messages: Input messages.
stop: Optional stop sequences.
run_manager: Optional callback manager.
**kwargs: Additional keyword arguments.
Raises:
Exception: Always raises the configured exception.
"""
raise self.error_type(self.error_message)
def test_model_retry_initialization_defaults() -> None:
"""Test ModelRetryMiddleware initialization with default values."""
retry = ModelRetryMiddleware()
assert retry.max_retries == 2
assert retry.tools == []
assert retry.on_failure == "continue"
assert retry.backoff_factor == 2.0
assert retry.initial_delay == 1.0
assert retry.max_delay == 60.0
assert retry.jitter is True
def test_model_retry_initialization_custom() -> None:
"""Test ModelRetryMiddleware initialization with custom values."""
retry = ModelRetryMiddleware(
max_retries=5,
retry_on=(ValueError, RuntimeError),
on_failure="error",
backoff_factor=1.5,
initial_delay=0.5,
max_delay=30.0,
jitter=False,
)
assert retry.max_retries == 5
assert retry.tools == []
assert retry.retry_on == (ValueError, RuntimeError)
assert retry.on_failure == "error"
assert retry.backoff_factor == 1.5
assert retry.initial_delay == 0.5
assert retry.max_delay == 30.0
assert retry.jitter is False
def test_model_retry_invalid_max_retries() -> None:
"""Test ModelRetryMiddleware raises error for invalid max_retries."""
with pytest.raises(ValueError, match="max_retries must be >= 0"):
ModelRetryMiddleware(max_retries=-1)
def test_model_retry_invalid_initial_delay() -> None:
"""Test ModelRetryMiddleware raises error for invalid initial_delay."""
with pytest.raises(ValueError, match="initial_delay must be >= 0"):
ModelRetryMiddleware(initial_delay=-1.0)
def test_model_retry_invalid_max_delay() -> None:
"""Test ModelRetryMiddleware raises error for invalid max_delay."""
with pytest.raises(ValueError, match="max_delay must be >= 0"):
ModelRetryMiddleware(max_delay=-1.0)
def test_model_retry_invalid_backoff_factor() -> None:
"""Test ModelRetryMiddleware raises error for invalid backoff_factor."""
with pytest.raises(ValueError, match="backoff_factor must be >= 0"):
ModelRetryMiddleware(backoff_factor=-1.0)
def test_model_retry_working_model_no_retry_needed() -> None:
"""Test ModelRetryMiddleware with a working model (no retry needed)."""
model = FakeToolCallingModel()
retry = ModelRetryMiddleware(max_retries=2, initial_delay=0.01, jitter=False)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
assert "Hello" in ai_messages[-1].content
def test_model_retry_failing_model_returns_message() -> None:
"""Test ModelRetryMiddleware with failing model returns error message."""
model = AlwaysFailingModel(error_message="Model error", error_type=ValueError)
retry = ModelRetryMiddleware(
max_retries=2,
initial_delay=0.01,
jitter=False,
on_failure="continue",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Should contain error message with attempts
last_msg = ai_messages[-1].content
assert "failed after 3 attempts" in last_msg
assert "ValueError" in last_msg
def test_model_retry_failing_model_raises() -> None:
"""Test ModelRetryMiddleware with on_failure='error' re-raises exception."""
model = AlwaysFailingModel(error_message="Model error", error_type=ValueError)
retry = ModelRetryMiddleware(
max_retries=2,
initial_delay=0.01,
jitter=False,
on_failure="error",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
# Should raise the ValueError from the model
with pytest.raises(ValueError, match="Model error"):
agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
def test_model_retry_custom_failure_formatter() -> None:
"""Test ModelRetryMiddleware with custom failure message formatter."""
def custom_formatter(exc: Exception) -> str:
return f"Custom error: {type(exc).__name__}"
model = AlwaysFailingModel(error_message="Model error", error_type=ValueError)
retry = ModelRetryMiddleware(
max_retries=1,
initial_delay=0.01,
jitter=False,
on_failure=custom_formatter,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
assert "Custom error: ValueError" in ai_messages[-1].content
def test_model_retry_succeeds_after_retries() -> None:
"""Test ModelRetryMiddleware succeeds after temporary failures."""
model = TemporaryFailureModel(fail_count=2)
retry = ModelRetryMiddleware(
max_retries=3,
initial_delay=0.01,
jitter=False,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Should succeed on 3rd attempt
assert "Success after 3 attempts" in ai_messages[-1].content
assert model.attempt == 3
def test_model_retry_specific_exceptions() -> None:
"""Test ModelRetryMiddleware only retries specific exception types."""
# This model will fail with RuntimeError, which we won't retry
model = AlwaysFailingModel(error_message="Runtime error", error_type=RuntimeError)
# Only retry ValueError
retry = ModelRetryMiddleware(
max_retries=2,
retry_on=(ValueError,),
initial_delay=0.01,
jitter=False,
on_failure="continue",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# RuntimeError should fail immediately (1 attempt only)
assert "1 attempt" in ai_messages[-1].content
def test_model_retry_custom_exception_filter() -> None:
"""Test ModelRetryMiddleware with custom exception filter function."""
class CustomError(Exception):
"""Custom exception with retry_me attribute."""
def __init__(self, message: str, retry_me: bool):
"""Initialize custom error.
Args:
message: Error message.
retry_me: Whether this error should be retried.
"""
super().__init__(message)
self.retry_me = retry_me
attempt_count = {"value": 0}
class CustomErrorModel(FakeToolCallingModel):
"""Model that raises CustomError."""
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Execute the model and raise CustomError.
Args:
messages: Input messages.
stop: Optional stop sequences.
run_manager: Optional callback manager.
**kwargs: Additional keyword arguments.
Raises:
CustomError: Always raises CustomError.
"""
attempt_count["value"] += 1
if attempt_count["value"] == 1:
raise CustomError("Retryable error", retry_me=True)
raise CustomError("Non-retryable error", retry_me=False)
def should_retry(exc: Exception) -> bool:
return isinstance(exc, CustomError) and exc.retry_me
model = CustomErrorModel()
retry = ModelRetryMiddleware(
max_retries=3,
retry_on=should_retry,
initial_delay=0.01,
jitter=False,
on_failure="continue",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Should retry once (attempt 1 with retry_me=True), then fail on attempt 2 (retry_me=False)
assert attempt_count["value"] == 2
assert "2 attempts" in ai_messages[-1].content
def test_model_retry_backoff_timing() -> None:
"""Test ModelRetryMiddleware applies correct backoff delays."""
model = TemporaryFailureModel(fail_count=3)
retry = ModelRetryMiddleware(
max_retries=3,
initial_delay=0.1,
backoff_factor=2.0,
jitter=False,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
start_time = time.time()
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
elapsed = time.time() - start_time
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Expected delays: 0.1 + 0.2 + 0.4 = 0.7 seconds
# Allow some margin for execution time
assert elapsed >= 0.6, f"Expected at least 0.6s, got {elapsed}s"
def test_model_retry_constant_backoff() -> None:
"""Test ModelRetryMiddleware with constant backoff (backoff_factor=0)."""
model = TemporaryFailureModel(fail_count=2)
retry = ModelRetryMiddleware(
max_retries=2,
initial_delay=0.1,
backoff_factor=0.0, # Constant backoff
jitter=False,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
start_time = time.time()
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
elapsed = time.time() - start_time
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Expected delays: 0.1 + 0.1 = 0.2 seconds (constant)
assert elapsed >= 0.15, f"Expected at least 0.15s, got {elapsed}s"
assert elapsed < 0.5, f"Expected less than 0.5s (exponential would be longer), got {elapsed}s"
def test_model_retry_max_delay_cap() -> None:
"""Test calculate_delay caps delay at max_delay."""
# Test delay calculation with aggressive backoff and max_delay cap
delay_0 = calculate_delay(
0,
backoff_factor=10.0, # Very aggressive backoff
initial_delay=1.0,
max_delay=2.0, # Cap at 2 seconds
jitter=False,
) # 1.0
delay_1 = calculate_delay(
1,
backoff_factor=10.0,
initial_delay=1.0,
max_delay=2.0,
jitter=False,
) # 10.0 -> capped to 2.0
delay_2 = calculate_delay(
2,
backoff_factor=10.0,
initial_delay=1.0,
max_delay=2.0,
jitter=False,
) # 100.0 -> capped to 2.0
assert delay_0 == 1.0
assert delay_1 == 2.0
assert delay_2 == 2.0
def test_model_retry_jitter_variation() -> None:
"""Test calculate_delay adds jitter to delays."""
# Generate multiple delays and ensure they vary
delays = [
calculate_delay(
0,
backoff_factor=1.0,
initial_delay=1.0,
max_delay=60.0,
jitter=True,
)
for _ in range(10)
]
# All delays should be within ±25% of 1.0 (i.e., between 0.75 and 1.25)
for delay in delays:
assert 0.75 <= delay <= 1.25
# Delays should vary (not all the same)
assert len(set(delays)) > 1
@pytest.mark.asyncio
async def test_model_retry_async_working_model() -> None:
"""Test ModelRetryMiddleware with async execution and working model."""
model = FakeToolCallingModel()
retry = ModelRetryMiddleware(max_retries=2, initial_delay=0.01, jitter=False)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = await agent.ainvoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
assert "Hello" in ai_messages[-1].content
@pytest.mark.asyncio
async def test_model_retry_async_failing_model() -> None:
"""Test ModelRetryMiddleware with async execution and failing model."""
model = AlwaysFailingModel(error_message="Model error", error_type=ValueError)
retry = ModelRetryMiddleware(
max_retries=2,
initial_delay=0.01,
jitter=False,
on_failure="continue",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = await agent.ainvoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
last_msg = ai_messages[-1].content
assert "failed after 3 attempts" in last_msg
assert "ValueError" in last_msg
@pytest.mark.asyncio
async def test_model_retry_async_succeeds_after_retries() -> None:
"""Test ModelRetryMiddleware async execution succeeds after temporary failures."""
model = TemporaryFailureModel(fail_count=2)
retry = ModelRetryMiddleware(
max_retries=3,
initial_delay=0.01,
jitter=False,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = await agent.ainvoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
assert "Success after 3 attempts" in ai_messages[-1].content
@pytest.mark.asyncio
async def test_model_retry_async_backoff_timing() -> None:
"""Test ModelRetryMiddleware async applies correct backoff delays."""
model = TemporaryFailureModel(fail_count=3)
retry = ModelRetryMiddleware(
max_retries=3,
initial_delay=0.1,
backoff_factor=2.0,
jitter=False,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
start_time = time.time()
result = await agent.ainvoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
elapsed = time.time() - start_time
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Expected delays: 0.1 + 0.2 + 0.4 = 0.7 seconds
assert elapsed >= 0.6, f"Expected at least 0.6s, got {elapsed}s"
def test_model_retry_zero_retries() -> None:
"""Test ModelRetryMiddleware with max_retries=0 (no retries)."""
model = AlwaysFailingModel(error_message="Model error", error_type=ValueError)
retry = ModelRetryMiddleware(
max_retries=0, # No retries
on_failure="continue",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Should fail after 1 attempt (no retries)
assert "1 attempt" in ai_messages[-1].content
def test_model_retry_multiple_middleware_composition() -> None:
"""Test ModelRetryMiddleware composes correctly with other middleware."""
call_log = []
# Custom middleware that logs calls
from langchain.agents.middleware.types import wrap_model_call
@wrap_model_call
def logging_middleware(request, handler):
call_log.append("before_model")
response = handler(request)
call_log.append("after_model")
return response
model = FakeToolCallingModel()
retry = ModelRetryMiddleware(max_retries=2, initial_delay=0.01, jitter=False)
agent = create_agent(
model=model,
tools=[],
middleware=[logging_middleware, retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
# Both middleware should be called
assert call_log == ["before_model", "after_model"]
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
assert "Hello" in ai_messages[-1].content
|
AlwaysFailingModel
|
python
|
pydata__xarray
|
xarray/util/generate_aggregations.py
|
{
"start": 13340,
"end": 15439
}
|
class ____(AggregationGenerator):
_dim_docstring = _DIM_DOCSTRING_GROUPBY
_template_signature = TEMPLATE_REDUCTION_SIGNATURE_GROUPBY
def generate_code(self, method, has_keep_attrs):
extra_kwargs = [kwarg.call for kwarg in method.extra_kwargs if kwarg.call]
if self.datastructure.numeric_only:
extra_kwargs.append(f"numeric_only={method.numeric_only},")
# median isn't enabled yet, because it would break if a single group was present in multiple
# chunks. The non-flox code path will just rechunk every group to a single chunk and execute the median
method_is_not_flox_supported = method.name in ("median", "cumsum", "cumprod")
if method_is_not_flox_supported:
indent = 12
else:
indent = 16
if extra_kwargs:
extra_kwargs = textwrap.indent("\n" + "\n".join(extra_kwargs), indent * " ")
else:
extra_kwargs = ""
if method_is_not_flox_supported:
return f"""\
return self.reduce(
duck_array_ops.{method.array_method},
dim=dim,{extra_kwargs}
keep_attrs=keep_attrs,
**kwargs,
)"""
min_version_check = f"""
and module_available("flox", minversion="{method.min_flox_version}")"""
return (
"""\
if (
flox_available
and OPTIONS["use_flox"]"""
+ (min_version_check if method.min_flox_version is not None else "")
+ f"""
and contains_only_chunked_or_numpy(self._obj)
):
return self._flox_reduce(
func="{method.name}",
dim=dim,{extra_kwargs}
# fill_value=fill_value,
keep_attrs=keep_attrs,
**kwargs,
)
else:
return self.reduce(
duck_array_ops.{method.array_method},
dim=dim,{extra_kwargs}
keep_attrs=keep_attrs,
**kwargs,
)"""
)
|
GroupByAggregationGenerator
|
python
|
google__pytype
|
pytype/tools/merge_pyi/test_data/scope.py
|
{
"start": 0,
"end": 186
}
|
class ____:
def f(self, x):
pass
def g(self):
def f(x): #gets ignored by pytype but fixer sees it, generates warning (FIXME?)
return 1
return f
|
C
|
python
|
huggingface__transformers
|
tests/models/focalnet/test_modeling_focalnet.py
|
{
"start": 16164,
"end": 16446
}
|
class ____(BackboneTesterMixin, unittest.TestCase):
all_model_classes = (FocalNetBackbone,) if is_torch_available() else ()
config_class = FocalNetConfig
has_attentions = False
def setUp(self):
self.model_tester = FocalNetModelTester(self)
|
FocalNetBackboneTest
|
python
|
hyperopt__hyperopt
|
hyperopt/exceptions.py
|
{
"start": 399,
"end": 603
}
|
class ____(ValueError):
"""Status of fmin evaluation was not in base.STATUS_STRINGS"""
def __init__(self, result):
ValueError.__init__(self)
self.result = result
|
InvalidResultStatus
|
python
|
pypa__warehouse
|
tests/unit/organizations/test_models.py
|
{
"start": 24802,
"end": 26454
}
|
class ____:
def test_is_in_good_standing_company_with_manual_activation(self, db_session):
organization = DBOrganizationFactory.create(orgtype="Company")
DBOrganizationManualActivationFactory.create(
organization=organization,
expires=datetime.date.today() + datetime.timedelta(days=365),
)
assert organization.is_in_good_standing()
def test_is_in_good_standing_company_without_billing(self, db_session):
organization = DBOrganizationFactory.create(orgtype="Company")
assert not organization.is_in_good_standing()
def test_is_in_good_standing_ignores_seat_limits(self, db_session):
"""Test that seat limits don't affect good standing - informational only."""
organization = DBOrganizationFactory.create(orgtype="Company")
activation = DBOrganizationManualActivationFactory.create(
organization=organization,
seat_limit=1, # Very low limit
expires=datetime.date.today() + datetime.timedelta(days=365),
)
# Create more members than seat limit allows
for _ in range(3):
user = DBUserFactory.create()
DBOrganizationRoleFactory.create(
organization=organization,
user=user,
role_name=OrganizationRoleType.Member,
)
# Organization should still be in good standing despite being over seat limit
assert organization.is_in_good_standing()
assert activation.current_member_count > activation.seat_limit
assert not activation.has_available_seats
|
TestOrganizationBillingMethods
|
python
|
apache__airflow
|
providers/papermill/src/airflow/providers/papermill/hooks/kernel.py
|
{
"start": 1582,
"end": 3436
}
|
class ____(BaseHook):
"""
The KernelHook can be used to interact with remote jupyter kernel.
Takes kernel host/ip from connection and refers to jupyter kernel ports and session_key
from ``extra`` field.
:param kernel_conn_id: connection that has kernel host/ip
"""
conn_name_attr = "kernel_conn_id"
default_conn_name = "jupyter_kernel_default"
conn_type = "jupyter_kernel"
hook_name = "Jupyter Kernel"
def __init__(self, kernel_conn_id: str = default_conn_name, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.kernel_conn = self.get_connection(kernel_conn_id)
register_remote_kernel_engine()
def get_conn(self) -> KernelConnection:
kernel_connection = KernelConnection()
kernel_connection.ip = cast("str", self.kernel_conn.host)
kernel_connection.shell_port = self.kernel_conn.extra_dejson.get(
"shell_port", JUPYTER_KERNEL_SHELL_PORT
)
kernel_connection.iopub_port = self.kernel_conn.extra_dejson.get(
"iopub_port", JUPYTER_KERNEL_IOPUB_PORT
)
kernel_connection.stdin_port = self.kernel_conn.extra_dejson.get(
"stdin_port", JUPYTER_KERNEL_STDIN_PORT
)
kernel_connection.control_port = self.kernel_conn.extra_dejson.get(
"control_port", JUPYTER_KERNEL_CONTROL_PORT
)
kernel_connection.hb_port = self.kernel_conn.extra_dejson.get("hb_port", JUPYTER_KERNEL_HB_PORT)
kernel_connection.session_key = self.kernel_conn.extra_dejson.get("session_key", "")
return kernel_connection
def register_remote_kernel_engine():
"""Register ``RemoteKernelEngine`` papermill engine."""
from papermill.engines import papermill_engines
papermill_engines.register(REMOTE_KERNEL_ENGINE, RemoteKernelEngine)
|
KernelHook
|
python
|
walkccc__LeetCode
|
solutions/1944. Number of Visible People in a Queue/1944.py
|
{
"start": 0,
"end": 331
}
|
class ____:
def canSeePersonsCount(self, heights: list[int]) -> list[int]:
ans = [0] * len(heights)
stack = []
for i, height in enumerate(heights):
while stack and heights[stack[-1]] <= height:
ans[stack.pop()] += 1
if stack:
ans[stack[-1]] += 1
stack.append(i)
return ans
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/binary-prefix-divisible-by-5.py
|
{
"start": 29,
"end": 274
}
|
class ____(object):
def prefixesDivBy5(self, A):
"""
:type A: List[int]
:rtype: List[bool]
"""
for i in xrange(1, len(A)):
A[i] += A[i-1] * 2 % 5
return [x % 5 == 0 for x in A]
|
Solution
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 347397,
"end": 351516
}
|
class ____(Request):
"""
Mark a task status as published. If a model was created, it should be set to ready.
:param force: If not true, call fails if the task status is not 'stopped'
:type force: bool
:param publish_model: Indicates that the task output model (if exists) should
be published. Optional, the default value is True.
:type publish_model: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "publish"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'stopped'",
"type": ["boolean", "null"],
},
"publish_model": {
"description": "Indicates that the task output model (if exists) should be published. Optional, the default value is True.",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
force: Optional[bool] = False,
publish_model: Optional[bool] = None,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
**kwargs: Any
) -> None:
super(PublishRequest, self).__init__(**kwargs)
self.force = force
self.publish_model = publish_model
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("publish_model")
def publish_model(self) -> Optional[bool]:
return self._property_publish_model
@publish_model.setter
def publish_model(self, value: Optional[bool]) -> None:
if value is None:
self._property_publish_model = None
return
self.assert_isinstance(value, "publish_model", (bool,))
self._property_publish_model = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
|
PublishRequest
|
python
|
astropy__astropy
|
astropy/io/votable/converters.py
|
{
"start": 35801,
"end": 44626
}
|
class ____(Converter):
"""
Handles the boolean datatype.
"""
format = "b1"
array_type = BooleanArray
vararray_type = ScalarVarArray
default = False
binary_question_mark = b"?"
binary_true = b"T"
binary_false = b"F"
def parse(self, value, config=None, pos=None):
if value == "":
return False, True
if value is False:
return False, True
mapping = {
"TRUE": (True, False),
"FALSE": (False, False),
"1": (True, False),
"0": (False, False),
"T": (True, False),
"F": (False, False),
"\0": (False, True),
" ": (False, True),
"?": (False, True),
"": (False, True),
}
try:
return mapping[value.upper()]
except KeyError:
vo_raise(E05, (value,), config, pos)
def output(self, value, mask):
if mask:
return "?"
if value:
return "T"
return "F"
def binparse(self, read):
value = ord(read(1))
return self.binparse_value(value)
_binparse_mapping = {
ord("T"): (True, False),
ord("t"): (True, False),
ord("1"): (True, False),
ord("F"): (False, False),
ord("f"): (False, False),
ord("0"): (False, False),
ord("\0"): (False, True),
ord(" "): (False, True),
ord("?"): (False, True),
}
def binparse_value(self, value):
try:
return self._binparse_mapping[value]
except KeyError:
vo_raise(E05, (value,))
def binoutput(self, value, mask):
if mask:
return self.binary_question_mark
if value:
return self.binary_true
return self.binary_false
converter_mapping = {
"double": Double,
"float": Float,
"bit": Bit,
"boolean": Boolean,
"unsignedByte": UnsignedByte,
"short": Short,
"int": Int,
"long": Long,
"floatComplex": FloatComplex,
"doubleComplex": DoubleComplex,
"char": Char,
"unicodeChar": UnicodeChar,
}
def get_converter(field, config=None, pos=None):
"""
Get an appropriate converter instance for a given field.
Parameters
----------
field : astropy.io.votable.tree.Field
config : dict, optional
Parser configuration dictionary
pos : tuple
Position in the input XML file. Used for error messages.
Returns
-------
converter : astropy.io.votable.converters.Converter
"""
if config is None:
config = {}
if field.datatype not in converter_mapping:
vo_raise(E06, (field.datatype, field.ID), config)
cls = converter_mapping[field.datatype]
converter = cls(field, config, pos)
arraysize = field.arraysize
# With numeric datatypes, special things need to happen for
# arrays.
if field.datatype not in ("char", "unicodeChar") and arraysize is not None:
if arraysize[-1] == "*":
arraysize = arraysize[:-1]
last_x = arraysize.rfind("x")
if last_x == -1:
arraysize = ""
else:
arraysize = arraysize[:last_x]
fixed = False
else:
fixed = True
if arraysize != "":
arraysize = [int(x) for x in arraysize.split("x")]
arraysize.reverse()
else:
arraysize = []
if arraysize != []:
converter = converter.array_type(field, converter, arraysize, config)
if not fixed:
converter = converter.vararray_type(field, converter, arraysize, config)
return converter
numpy_dtype_to_field_mapping = {
np.float64().dtype.num: "double",
np.float32().dtype.num: "float",
np.bool_().dtype.num: "bit",
np.uint8().dtype.num: "unsignedByte",
np.int16().dtype.num: "short",
np.int32().dtype.num: "int",
np.int64().dtype.num: "long",
np.complex64().dtype.num: "floatComplex",
np.complex128().dtype.num: "doubleComplex",
np.str_().dtype.num: "unicodeChar",
np.bytes_().dtype.num: "char",
}
def _all_matching_dtype(column):
first_dtype = False
first_shape = ()
for x in column:
if not isinstance(x, np.ndarray) or len(x) == 0:
continue
if first_dtype is False:
first_dtype = x.dtype
first_shape = x.shape[1:]
elif first_dtype != x.dtype:
return False, ()
elif first_shape != x.shape[1:]:
first_shape = ()
return first_dtype, first_shape
def numpy_to_votable_dtype(dtype, shape):
"""
Converts a numpy dtype and shape to a dictionary of attributes for
a VOTable FIELD element and correspond to that type.
Parameters
----------
dtype : Numpy dtype instance
shape : tuple
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element.
"""
if dtype.num not in numpy_dtype_to_field_mapping:
raise TypeError(f"{dtype!r} can not be represented in VOTable")
if dtype.char == "S":
return {"datatype": "char", "arraysize": str(dtype.itemsize)}
elif dtype.char == "U":
return {"datatype": "unicodeChar", "arraysize": str(dtype.itemsize // 4)}
else:
result = {"datatype": numpy_dtype_to_field_mapping[dtype.num]}
if len(shape):
result["arraysize"] = "x".join(str(x) for x in shape)
return result
def table_column_to_votable_datatype(column):
"""
Given a `astropy.table.Column` instance, returns the attributes
necessary to create a VOTable FIELD element that corresponds to
the type of the column.
This necessarily must perform some heuristics to determine the
type of variable length arrays fields, since they are not directly
supported by Numpy.
If the column has dtype of "object", it performs the following
tests:
- If all elements are byte or unicode strings, it creates a
variable-length byte or unicode field, respectively.
- If all elements are numpy arrays of the same dtype and with a
consistent shape in all but the first dimension, it creates a
variable length array of fixed sized arrays. If the dtypes
match, but the shapes do not, a variable length array is
created.
If the dtype of the input is not understood, it sets the data type
to the most inclusive: a variable length unicodeChar array.
Parameters
----------
column : `astropy.table.Column` instance
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element.
"""
votable_string_dtype = None
max_length = None
original_arraysize = None
if column.info.meta is not None:
votable_string_dtype = column.info.meta.get("_votable_string_dtype")
# Check if we have stored the original arraysize with bounds
# If so, extract the max length
original_arraysize = column.info.meta.get("_votable_arraysize")
if (
original_arraysize is not None
and original_arraysize.endswith("*")
and not original_arraysize == "*"
):
max_length = original_arraysize[:-1]
if column.dtype.char == "O":
arraysize = "*"
# If max length is stored use it to create a bounded var-length array
if max_length is not None:
arraysize = f"{max_length}*"
if votable_string_dtype is not None:
return {"datatype": votable_string_dtype, "arraysize": arraysize}
elif isinstance(column[0], np.ndarray):
dtype, shape = _all_matching_dtype(column)
if dtype is not False:
result = numpy_to_votable_dtype(dtype, shape)
if "arraysize" not in result:
result["arraysize"] = arraysize
else:
result["arraysize"] += "*"
return result
# All bets are off, do the most generic thing
return {"datatype": "unicodeChar", "arraysize": arraysize}
# For fixed size string columns, datatype here will be unicodeChar,
# but honor the original FIELD datatype if present.
result = numpy_to_votable_dtype(column.dtype, column.shape[1:])
if result["datatype"] == "unicodeChar" and votable_string_dtype == "char":
result["datatype"] = "char"
# If we stored the original arraysize, use it instead of what
# numpy_to_votable_dtype derives
if original_arraysize is not None:
result["arraysize"] = original_arraysize
return result
|
Boolean
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 914273,
"end": 914993
}
|
class ____(sgqlc.types.relay.Connection):
"""The connection type for Reactor."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ReactorEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Reactor"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
|
ReactorConnection
|
python
|
doocs__leetcode
|
solution/1600-1699/1675.Minimize Deviation in Array/Solution.py
|
{
"start": 0,
"end": 445
}
|
class ____:
def minimumDeviation(self, nums: List[int]) -> int:
h = []
mi = inf
for v in nums:
if v & 1:
v <<= 1
h.append(-v)
mi = min(mi, v)
heapify(h)
ans = -h[0] - mi
while h[0] % 2 == 0:
x = heappop(h) // 2
heappush(h, x)
mi = min(mi, -x)
ans = min(ans, -h[0] - mi)
return ans
|
Solution
|
python
|
getsentry__sentry
|
src/sentry_plugins/bitbucket/client.py
|
{
"start": 238,
"end": 3641
}
|
class ____(AuthApiClient):
base_url = "https://api.bitbucket.org"
plugin_name = "bitbucket"
def has_auth(self):
return (
self.auth
and "oauth_token" in self.auth.tokens
and "oauth_token_secret" in self.auth.tokens
)
def bind_auth(self, **kwargs):
kwargs["auth"] = OAuth1(
str(settings.BITBUCKET_CONSUMER_KEY),
str(settings.BITBUCKET_CONSUMER_SECRET),
self.auth.tokens["oauth_token"],
self.auth.tokens["oauth_token_secret"],
signature_type="auth_header",
decoding=None,
)
return kwargs
def get_issue(self, repo, issue_id):
return self.get(f"/1.0/repositories/{repo}/issues/{issue_id}")
def create_issue(self, repo, data):
data = {
"title": data["title"],
"content": data["description"],
"kind": data["issue_type"],
"priority": data["priority"],
}
return self.post(f"/1.0/repositories/{repo}/issues", data=data, json=False)
def search_issues(self, repo, query):
return self.get(f"/1.0/repositories/{repo}/issues", params={"search": query})
def create_comment(self, repo, issue_id, data):
return self.post(
f"/1.0/repositories/{repo}/issues/{issue_id}/comments", data=data, json=False
)
def get_repo(self, repo):
return self.get(f"/2.0/repositories/{repo}")
def create_hook(self, repo, data):
return self.post(f"/2.0/repositories/{repo}/hooks", data=data)
def delete_hook(self, repo, id):
return self.delete(f"/2.0/repositories/{repo}/hooks/{id}")
def get_commit_filechanges(self, repo, sha):
# returns unidiff file
resp = self.get(f"/2.0/repositories/{repo}/diff/{sha}", allow_text=True)
return patch_to_file_changes(resp.text)
def zip_commit_data(self, repo, commit_list):
for commit in commit_list:
commit.update({"patch_set": self.get_commit_filechanges(repo, commit["hash"])})
return commit_list
def get_last_commits(self, repo, end_sha):
# return api request that fetches last ~30 commits
# see https://developer.atlassian.com/bitbucket/api/2/reference/resource/repositories/%7Busername%7D/%7Brepo_slug%7D/commits/%7Brevision%7D
# using end_sha as parameter
data = self.get(f"/2.0/repositories/{repo}/commits/{end_sha}")
return self.zip_commit_data(repo, data["values"])
def compare_commits(self, repo, start_sha, end_sha):
# where start_sha is oldest and end_sha is most recent
# see
# https://developer.atlassian.com/bitbucket/api/2/reference/resource/repositories/%7Busername%7D/%7Brepo_slug%7D/commits/%7Brevision%7D
commits: list[dict[str, Any]] = []
done = False
url = f"/2.0/repositories/{repo}/commits/{end_sha}"
while not done and len(commits) < 90:
data = self.get(url)
for commit in data["values"]:
if commit["hash"].startswith(start_sha):
done = True
break
commits.append(commit)
# move page forward
try:
url = data["next"]
except KeyError:
break
return self.zip_commit_data(repo, commits)
|
BitbucketClient
|
python
|
sqlalchemy__sqlalchemy
|
test/typing/plain_files/sql/dml.py
|
{
"start": 611,
"end": 1249
}
|
class ____(Base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
data: Mapped[str]
# test #9376
d1: dict[str, Any] = {}
stmt1 = insert(User).values(d1)
d2: Dict[str, Any] = {}
stmt2 = insert(User).values(d2)
d3: Dict[Column[str], Any] = {}
stmt3 = insert(User).values(d3)
stmt4 = insert(User).from_select(
[User.id, "name", User.__table__.c.data],
select(User.id, User.name, User.data),
)
# test #10353
stmt5 = update(User).values({User.id: 123, User.data: "value"})
stmt6 = user_table.update().values(
{user_table.c.d: 123, user_table.c.data: "value"}
)
|
User
|
python
|
spyder-ide__spyder
|
spyder/utils/syntaxhighlighters.py
|
{
"start": 57588,
"end": 66578
}
|
class ____(BaseSH):
"""Simple Diff/Patch Syntax Highlighter Class"""
def highlight_block(self, text):
"""Implement highlight specific Diff/Patch files."""
text = str(text)
if text.startswith("+++"):
self.setFormat(0, qstring_length(text), self.formats["keyword"])
elif text.startswith("---"):
self.setFormat(0, qstring_length(text), self.formats["keyword"])
elif text.startswith("+"):
self.setFormat(0, qstring_length(text), self.formats["string"])
elif text.startswith("-"):
self.setFormat(0, qstring_length(text), self.formats["number"])
elif text.startswith("@"):
self.setFormat(0, qstring_length(text), self.formats["builtin"])
self.highlight_extras(text)
#==============================================================================
# NSIS highlighter
#==============================================================================
def make_nsis_patterns():
"Strongly inspired from idlelib.ColorDelegator.make_pat"
kwstr1 = 'Abort AddBrandingImage AddSize AllowRootDirInstall AllowSkipFiles AutoCloseWindow BGFont BGGradient BrandingText BringToFront Call CallInstDLL Caption ClearErrors CompletedText ComponentText CopyFiles CRCCheck CreateDirectory CreateFont CreateShortCut Delete DeleteINISec DeleteINIStr DeleteRegKey DeleteRegValue DetailPrint DetailsButtonText DirText DirVar DirVerify EnableWindow EnumRegKey EnumRegValue Exec ExecShell ExecWait Exch ExpandEnvStrings File FileBufSize FileClose FileErrorText FileOpen FileRead FileReadByte FileSeek FileWrite FileWriteByte FindClose FindFirst FindNext FindWindow FlushINI Function FunctionEnd GetCurInstType GetCurrentAddress GetDlgItem GetDLLVersion GetDLLVersionLocal GetErrorLevel GetFileTime GetFileTimeLocal GetFullPathName GetFunctionAddress GetInstDirError GetLabelAddress GetTempFileName Goto HideWindow ChangeUI CheckBitmap Icon IfAbort IfErrors IfFileExists IfRebootFlag IfSilent InitPluginsDir InstallButtonText InstallColors InstallDir InstallDirRegKey InstProgressFlags InstType InstTypeGetText InstTypeSetText IntCmp IntCmpU IntFmt IntOp IsWindow LangString LicenseBkColor LicenseData LicenseForceSelection LicenseLangString LicenseText LoadLanguageFile LogSet LogText MessageBox MiscButtonText Name OutFile Page PageCallbacks PageEx PageExEnd Pop Push Quit ReadEnvStr ReadINIStr ReadRegDWORD ReadRegStr Reboot RegDLL Rename ReserveFile Return RMDir SearchPath Section SectionEnd SectionGetFlags SectionGetInstTypes SectionGetSize SectionGetText SectionIn SectionSetFlags SectionSetInstTypes SectionSetSize SectionSetText SendMessage SetAutoClose SetBrandingImage SetCompress SetCompressor SetCompressorDictSize SetCtlColors SetCurInstType SetDatablockOptimize SetDateSave SetDetailsPrint SetDetailsView SetErrorLevel SetErrors SetFileAttributes SetFont SetOutPath SetOverwrite SetPluginUnload SetRebootFlag SetShellVarContext SetSilent ShowInstDetails ShowUninstDetails ShowWindow SilentInstall SilentUnInstall Sleep SpaceTexts StrCmp StrCpy StrLen SubCaption SubSection SubSectionEnd UninstallButtonText UninstallCaption UninstallIcon UninstallSubCaption UninstallText UninstPage UnRegDLL Var VIAddVersionKey VIProductVersion WindowIcon WriteINIStr WriteRegBin WriteRegDWORD WriteRegExpandStr WriteRegStr WriteUninstaller XPStyle'
kwstr2 = 'all alwaysoff ARCHIVE auto both bzip2 components current custom details directory false FILE_ATTRIBUTE_ARCHIVE FILE_ATTRIBUTE_HIDDEN FILE_ATTRIBUTE_NORMAL FILE_ATTRIBUTE_OFFLINE FILE_ATTRIBUTE_READONLY FILE_ATTRIBUTE_SYSTEM FILE_ATTRIBUTE_TEMPORARY force grey HIDDEN hide IDABORT IDCANCEL IDIGNORE IDNO IDOK IDRETRY IDYES ifdiff ifnewer instfiles instfiles lastused leave left level license listonly lzma manual MB_ABORTRETRYIGNORE MB_DEFBUTTON1 MB_DEFBUTTON2 MB_DEFBUTTON3 MB_DEFBUTTON4 MB_ICONEXCLAMATION MB_ICONINFORMATION MB_ICONQUESTION MB_ICONSTOP MB_OK MB_OKCANCEL MB_RETRYCANCEL MB_RIGHT MB_SETFOREGROUND MB_TOPMOST MB_YESNO MB_YESNOCANCEL nevershow none NORMAL off OFFLINE on READONLY right RO show silent silentlog SYSTEM TEMPORARY text textonly true try uninstConfirm windows zlib'
kwstr3 = 'MUI_ABORTWARNING MUI_ABORTWARNING_CANCEL_DEFAULT MUI_ABORTWARNING_TEXT MUI_BGCOLOR MUI_COMPONENTSPAGE_CHECKBITMAP MUI_COMPONENTSPAGE_NODESC MUI_COMPONENTSPAGE_SMALLDESC MUI_COMPONENTSPAGE_TEXT_COMPLIST MUI_COMPONENTSPAGE_TEXT_DESCRIPTION_INFO MUI_COMPONENTSPAGE_TEXT_DESCRIPTION_TITLE MUI_COMPONENTSPAGE_TEXT_INSTTYPE MUI_COMPONENTSPAGE_TEXT_TOP MUI_CUSTOMFUNCTION_ABORT MUI_CUSTOMFUNCTION_GUIINIT MUI_CUSTOMFUNCTION_UNABORT MUI_CUSTOMFUNCTION_UNGUIINIT MUI_DESCRIPTION_TEXT MUI_DIRECTORYPAGE_BGCOLOR MUI_DIRECTORYPAGE_TEXT_DESTINATION MUI_DIRECTORYPAGE_TEXT_TOP MUI_DIRECTORYPAGE_VARIABLE MUI_DIRECTORYPAGE_VERIFYONLEAVE MUI_FINISHPAGE_BUTTON MUI_FINISHPAGE_CANCEL_ENABLED MUI_FINISHPAGE_LINK MUI_FINISHPAGE_LINK_COLOR MUI_FINISHPAGE_LINK_LOCATION MUI_FINISHPAGE_NOAUTOCLOSE MUI_FINISHPAGE_NOREBOOTSUPPORT MUI_FINISHPAGE_REBOOTLATER_DEFAULT MUI_FINISHPAGE_RUN MUI_FINISHPAGE_RUN_FUNCTION MUI_FINISHPAGE_RUN_NOTCHECKED MUI_FINISHPAGE_RUN_PARAMETERS MUI_FINISHPAGE_RUN_TEXT MUI_FINISHPAGE_SHOWREADME MUI_FINISHPAGE_SHOWREADME_FUNCTION MUI_FINISHPAGE_SHOWREADME_NOTCHECKED MUI_FINISHPAGE_SHOWREADME_TEXT MUI_FINISHPAGE_TEXT MUI_FINISHPAGE_TEXT_LARGE MUI_FINISHPAGE_TEXT_REBOOT MUI_FINISHPAGE_TEXT_REBOOTLATER MUI_FINISHPAGE_TEXT_REBOOTNOW MUI_FINISHPAGE_TITLE MUI_FINISHPAGE_TITLE_3LINES MUI_FUNCTION_DESCRIPTION_BEGIN MUI_FUNCTION_DESCRIPTION_END MUI_HEADER_TEXT MUI_HEADER_TRANSPARENT_TEXT MUI_HEADERIMAGE MUI_HEADERIMAGE_BITMAP MUI_HEADERIMAGE_BITMAP_NOSTRETCH MUI_HEADERIMAGE_BITMAP_RTL MUI_HEADERIMAGE_BITMAP_RTL_NOSTRETCH MUI_HEADERIMAGE_RIGHT MUI_HEADERIMAGE_UNBITMAP MUI_HEADERIMAGE_UNBITMAP_NOSTRETCH MUI_HEADERIMAGE_UNBITMAP_RTL MUI_HEADERIMAGE_UNBITMAP_RTL_NOSTRETCH MUI_HWND MUI_ICON MUI_INSTALLCOLORS MUI_INSTALLOPTIONS_DISPLAY MUI_INSTALLOPTIONS_DISPLAY_RETURN MUI_INSTALLOPTIONS_EXTRACT MUI_INSTALLOPTIONS_EXTRACT_AS MUI_INSTALLOPTIONS_INITDIALOG MUI_INSTALLOPTIONS_READ MUI_INSTALLOPTIONS_SHOW MUI_INSTALLOPTIONS_SHOW_RETURN MUI_INSTALLOPTIONS_WRITE MUI_INSTFILESPAGE_ABORTHEADER_SUBTEXT MUI_INSTFILESPAGE_ABORTHEADER_TEXT MUI_INSTFILESPAGE_COLORS MUI_INSTFILESPAGE_FINISHHEADER_SUBTEXT MUI_INSTFILESPAGE_FINISHHEADER_TEXT MUI_INSTFILESPAGE_PROGRESSBAR MUI_LANGDLL_ALLLANGUAGES MUI_LANGDLL_ALWAYSSHOW MUI_LANGDLL_DISPLAY MUI_LANGDLL_INFO MUI_LANGDLL_REGISTRY_KEY MUI_LANGDLL_REGISTRY_ROOT MUI_LANGDLL_REGISTRY_VALUENAME MUI_LANGDLL_WINDOWTITLE MUI_LANGUAGE MUI_LICENSEPAGE_BGCOLOR MUI_LICENSEPAGE_BUTTON MUI_LICENSEPAGE_CHECKBOX MUI_LICENSEPAGE_CHECKBOX_TEXT MUI_LICENSEPAGE_RADIOBUTTONS MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_ACCEPT MUI_LICENSEPAGE_RADIOBUTTONS_TEXT_DECLINE MUI_LICENSEPAGE_TEXT_BOTTOM MUI_LICENSEPAGE_TEXT_TOP MUI_PAGE_COMPONENTS MUI_PAGE_CUSTOMFUNCTION_LEAVE MUI_PAGE_CUSTOMFUNCTION_PRE MUI_PAGE_CUSTOMFUNCTION_SHOW MUI_PAGE_DIRECTORY MUI_PAGE_FINISH MUI_PAGE_HEADER_SUBTEXT MUI_PAGE_HEADER_TEXT MUI_PAGE_INSTFILES MUI_PAGE_LICENSE MUI_PAGE_STARTMENU MUI_PAGE_WELCOME MUI_RESERVEFILE_INSTALLOPTIONS MUI_RESERVEFILE_LANGDLL MUI_SPECIALINI MUI_STARTMENU_GETFOLDER MUI_STARTMENU_WRITE_BEGIN MUI_STARTMENU_WRITE_END MUI_STARTMENUPAGE_BGCOLOR MUI_STARTMENUPAGE_DEFAULTFOLDER MUI_STARTMENUPAGE_NODISABLE MUI_STARTMENUPAGE_REGISTRY_KEY MUI_STARTMENUPAGE_REGISTRY_ROOT MUI_STARTMENUPAGE_REGISTRY_VALUENAME MUI_STARTMENUPAGE_TEXT_CHECKBOX MUI_STARTMENUPAGE_TEXT_TOP MUI_UI MUI_UI_COMPONENTSPAGE_NODESC MUI_UI_COMPONENTSPAGE_SMALLDESC MUI_UI_HEADERIMAGE MUI_UI_HEADERIMAGE_RIGHT MUI_UNABORTWARNING MUI_UNABORTWARNING_CANCEL_DEFAULT MUI_UNABORTWARNING_TEXT MUI_UNCONFIRMPAGE_TEXT_LOCATION MUI_UNCONFIRMPAGE_TEXT_TOP MUI_UNFINISHPAGE_NOAUTOCLOSE MUI_UNFUNCTION_DESCRIPTION_BEGIN MUI_UNFUNCTION_DESCRIPTION_END MUI_UNGETLANGUAGE MUI_UNICON MUI_UNPAGE_COMPONENTS MUI_UNPAGE_CONFIRM MUI_UNPAGE_DIRECTORY MUI_UNPAGE_FINISH MUI_UNPAGE_INSTFILES MUI_UNPAGE_LICENSE MUI_UNPAGE_WELCOME MUI_UNWELCOMEFINISHPAGE_BITMAP MUI_UNWELCOMEFINISHPAGE_BITMAP_NOSTRETCH MUI_UNWELCOMEFINISHPAGE_INI MUI_WELCOMEFINISHPAGE_BITMAP MUI_WELCOMEFINISHPAGE_BITMAP_NOSTRETCH MUI_WELCOMEFINISHPAGE_CUSTOMFUNCTION_INIT MUI_WELCOMEFINISHPAGE_INI MUI_WELCOMEPAGE_TEXT MUI_WELCOMEPAGE_TITLE MUI_WELCOMEPAGE_TITLE_3LINES'
bistr = 'addincludedir addplugindir AndIf cd define echo else endif error execute If ifdef ifmacrodef ifmacrondef ifndef include insertmacro macro macroend onGUIEnd onGUIInit onInit onInstFailed onInstSuccess onMouseOverSection onRebootFailed onSelChange onUserAbort onVerifyInstDir OrIf packhdr system undef verbose warning'
instance = any("instance", [r'\$\{.*?\}', r'\$[A-Za-z0-9\_]*'])
define = any("define", [r"\![^\n]*"])
comment = any("comment", [r"\;[^\n]*", r"\#[^\n]*", r"\/\*(.*?)\*\/"])
return make_generic_c_patterns(kwstr1+' '+kwstr2+' '+kwstr3, bistr,
instance=instance, define=define,
comment=comment)
|
DiffSH
|
python
|
tensorflow__tensorflow
|
tensorflow/python/eager/polymorphic_function/polymorphic_function.py
|
{
"start": 7972,
"end": 8834
}
|
class ____(object):
"""Class for the management of all _FrequentTracingDetector objects."""
__slots__ = ["_detectors", "_lock"]
def __init__(self):
self._detectors = weakref.WeakKeyDictionary() # GUARDED_BY(self._lock)
self._lock = threading.Lock()
def _get_detector(self, key):
if key not in self._detectors:
self._detectors[key] = _FrequentTracingDetector()
return self._detectors[key]
def called_without_tracing(self, key):
with self._lock:
detector = self._get_detector(key)
detector.called_without_tracing()
def called_with_tracing(self, key, function_name, omit_warning):
with self._lock:
detector = self._get_detector(key)
detector.called_with_tracing(function_name, omit_warning)
_frequent_tracing_detector_manager = _FrequentTracingDetectorManager()
|
_FrequentTracingDetectorManager
|
python
|
bottlepy__bottle
|
test/test_outputfilter.py
|
{
"start": 290,
"end": 6258
}
|
class ____(ServerTestBase):
''' Tests for WSGI functionality, routing and output casting (decorators) '''
def test_bytes(self):
self.app.route('/')(lambda: tob('test'))
self.assertBody('test')
def test_bytearray(self):
self.app.route('/')(lambda: map(tob, ['t', 'e', 'st']))
self.assertBody('test')
def test_tuple(self):
self.app.route('/')(lambda: ('t', 'e', 'st'))
self.assertBody('test')
def test_emptylist(self):
self.app.route('/')(lambda: [])
self.assertBody('')
def test_none(self):
self.app.route('/')(lambda: None)
self.assertBody('')
def test_illegal(self):
self.app.route('/')(lambda: 1234)
self.assertStatus(500)
self.assertInBody('Unhandled exception')
def test_error(self):
bottle.debug(True)
self.app.route('/')(lambda: 1/0)
self.assertStatus(500)
self.assertInBody('ZeroDivisionError')
def test_fatal_error(self):
@self.app.route('/')
def test(): raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, self.assertStatus, 500)
def test_file(self):
self.app.route('/')(lambda: tobs('test'))
self.assertBody('test')
def test_unicode(self):
self.app.route('/')(lambda: touni('äöüß'))
self.assertBody(touni('äöüß').encode('utf8'))
self.app.route('/')(lambda: [touni('äö'), touni('üß')])
self.assertBody(touni('äöüß').encode('utf8'))
@self.app.route('/')
def test5():
bottle.response.content_type='text/html; charset=iso-8859-15'
return touni('äöüß')
self.assertBody(touni('äöüß').encode('iso-8859-15'))
@self.app.route('/')
def test5():
bottle.response.content_type='text/html'
return touni('äöüß')
self.assertBody(touni('äöüß').encode('utf8'))
def test_json(self):
self.app.route('/')(lambda: {'a': 1})
self.assertBody(bottle.json_dumps({'a': 1}))
self.assertHeader('Content-Type','application/json')
@unittest.skipIf(USING_UJSON, 'ujson do not throw exception in serialize')
def test_json_serialization_error(self):
"""
Verify that 500 errors serializing dictionaries don't return
content-type application/json
"""
self.app.route('/')(lambda: {'a': set()})
self.assertStatus(500)
self.assertHeader('Content-Type','text/html; charset=UTF-8')
def test_json_HTTPResponse(self):
self.app.route('/')(lambda: bottle.HTTPResponse({'a': 1}, 500))
self.assertBody(bottle.json_dumps({'a': 1}))
self.assertHeader('Content-Type','application/json')
def test_json_HTTPError(self):
self.app.error(400)(lambda e: e.body)
self.app.route('/')(lambda: bottle.HTTPError(400, {'a': 1}))
self.assertBody(bottle.json_dumps({'a': 1}))
self.assertHeader('Content-Type','application/json')
def test_generator_callback(self):
@self.app.route('/')
def test():
bottle.response.headers['Test-Header'] = 'test'
yield 'foo'
self.assertBody('foo')
self.assertHeader('Test-Header', 'test')
def test_empty_generator_callback(self):
@self.app.route('/')
def test():
yield
bottle.response.headers['Test-Header'] = 'test'
self.assertBody('')
self.assertHeader('Test-Header', 'test')
def test_error_in_generator_callback(self):
@self.app.route('/')
def test():
yield 1/0
self.assertStatus(500)
self.assertInBody('ZeroDivisionError')
def test_fatal_error_in_generator_callback(self):
@self.app.route('/')
def test():
yield
raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, self.assertStatus, 500)
def test_httperror_in_generator_callback(self):
@self.app.route('/')
def test():
yield
bottle.abort(404, 'teststring')
self.assertInBody('teststring')
self.assertInBody('404 Not Found')
self.assertStatus(404)
def test_httpresponse_in_generator_callback(self):
@self.app.route('/')
def test():
yield bottle.HTTPResponse('test')
self.assertBody('test')
def test_unicode_generator_callback(self):
@self.app.route('/')
def test():
yield touni('äöüß')
self.assertBody(touni('äöüß').encode('utf8'))
def test_invalid_generator_callback(self):
@self.app.route('/')
def test():
yield 1234
self.assertStatus(500)
self.assertInBody('Unsupported response type')
def test_iterator_with_close(self):
class MyIter(object):
def __init__(self, data):
self.data = data
self.closed = False
def close(self): self.closed = True
def __iter__(self): return iter(self.data)
byte_iter = MyIter([tob('abc'), tob('def')])
unicode_iter = MyIter([touni('abc'), touni('def')])
for test_iter in (byte_iter, unicode_iter):
@self.app.route('/')
def test(): return test_iter
self.assertInBody('abcdef')
self.assertTrue(byte_iter.closed)
def test_cookie(self):
""" WSGI: Cookies """
@bottle.route('/cookie')
def test():
bottle.response.set_cookie('b', 'b')
bottle.response.set_cookie('c', 'c', path='/')
return 'hello'
try:
c = self.urlopen('/cookie')['header'].get_all('Set-Cookie', '')
except:
c = self.urlopen('/cookie')['header'].get('Set-Cookie', '').split(',')
c = [x.strip() for x in c]
self.assertTrue('b=b' in c)
self.assertTrue('c=c; Path=/' in c)
|
TestOutputFilter
|
python
|
wireservice__csvkit
|
csvkit/utilities/csvlook.py
|
{
"start": 101,
"end": 3049
}
|
class ____(CSVKitUtility):
description = 'Render a CSV file in the console as a Markdown-compatible, fixed-width table.'
def add_arguments(self):
self.argparser.add_argument(
'--max-rows', dest='max_rows', type=int,
help='The maximum number of rows to display before truncating the data.')
self.argparser.add_argument(
'--max-columns', dest='max_columns', type=int,
help='The maximum number of columns to display before truncating the data.')
self.argparser.add_argument(
'--max-column-width', dest='max_column_width', type=int,
help='Truncate all columns to at most this width. The remainder will be replaced with ellipsis.')
self.argparser.add_argument(
'--max-precision', dest='max_precision', type=int,
help='The maximum number of decimal places to display. The remainder will be replaced with ellipsis.')
self.argparser.add_argument(
'--no-number-ellipsis', dest='no_number_ellipsis', action='store_true',
help='Disable the ellipsis if --max-precision is exceeded.')
self.argparser.add_argument(
'-y', '--snifflimit', dest='sniff_limit', type=int, default=1024,
help='Limit CSV dialect sniffing to the specified number of bytes. '
'Specify "0" to disable sniffing entirely, or "-1" to sniff the entire file.')
self.argparser.add_argument(
'-I', '--no-inference', dest='no_inference', action='store_true',
help='Disable type inference (and --locale, --date-format, --datetime-format, --no-leading-zeroes) '
'when parsing the input.')
def main(self):
if self.additional_input_expected():
self.argparser.error('You must provide an input file or piped data.')
kwargs = {}
# In agate, max_precision defaults to 3. None means infinity.
if self.args.max_precision is not None:
kwargs['max_precision'] = self.args.max_precision
if self.args.no_number_ellipsis:
config.set_option('number_truncation_chars', '')
sniff_limit = self.args.sniff_limit if self.args.sniff_limit != -1 else None
table = agate.Table.from_csv(
self.input_file,
skip_lines=self.args.skip_lines,
sniff_limit=sniff_limit,
row_limit=self.args.max_rows,
column_types=self.get_column_types(),
line_numbers=self.args.line_numbers,
**self.reader_kwargs,
)
table.print_table(
output=self.output_file,
max_rows=self.args.max_rows,
max_columns=self.args.max_columns,
max_column_width=self.args.max_column_width,
**kwargs,
)
def launch_new_instance():
utility = CSVLook()
utility.run()
if __name__ == '__main__':
launch_new_instance()
|
CSVLook
|
python
|
pandas-dev__pandas
|
pandas/tests/arithmetic/test_datetime64.py
|
{
"start": 73796,
"end": 91345
}
|
class ____:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, unit="ns")
expected = expected._with_freq(None)
# add with TimedeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, unit="ns")
expected = expected._with_freq(None)
# iadd with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D", unit="ns")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture, unit):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10).as_unit(unit)
tdi = pd.timedelta_range("0 days", periods=10, unit=unit)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D", unit=unit)
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10).as_unit(unit)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10).as_unit(unit)
result -= tdi.values
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self, unit):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3, unit=unit)
dti_tz = date_range("20130101", periods=3, unit=unit).tz_localize("US/Eastern")
expected = TimedeltaIndex([0, 0, 0]).as_unit(unit)
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "Cannot subtract tz-naive and tz-aware datetime-like objects"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3, unit=unit)
dti2 = date_range("20130101", periods=4, unit=unit)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"]).as_unit(unit)
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan]).as_unit(unit)
expected = TimedeltaIndex(["1 days", np.nan, np.nan]).as_unit(unit)
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
obj = box_with_array(ser)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(map(np.timedelta64, args, intervals)))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(obj, nptd)
rhs = op(obj, pytd)
tm.assert_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self, unit):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x", unit=unit)
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x", unit=unit)
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x", unit=unit)
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
# When adding/subtracting an ndarray (which has no .freq), the result
# does not infer freq
idx = idx._with_freq(None)
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(
["2011-01-02", "2011-01-05", "2011-01-08"], name="x"
).as_unit(unit)
for result in [idx + delta, np.add(idx, delta)]:
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
exp = DatetimeIndex(
["2010-12-31", "2011-01-01", "2011-01-02"], name="x"
).as_unit(unit)
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_dti_add_series(self, tz_naive_fixture, names):
# GH#13905
tz = tz_naive_fixture
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
).as_unit("ns")
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
def test_dti_addsub_offset_arraylike(
self, performance_warning, tz_naive_fixture, names, op, index_or_series
):
# GH#18849, GH#19744
other_box = index_or_series
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
xbox = get_upcast_box(dti, other)
with tm.assert_produces_warning(performance_warning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox).astype(object)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [pd.Index, np.array])
def test_dti_addsub_object_arraylike(
self, performance_warning, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)])
xbox = get_upcast_box(dtarr, other)
expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox).astype(object)
with tm.assert_produces_warning(performance_warning):
result = dtarr + other
tm.assert_equal(result, expected)
expected = DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox).astype(object)
with tm.assert_produces_warning(performance_warning):
result = dtarr - other
tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shift_months(years, months, unit):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
]
).as_unit(unit)
shifted = shift_months(dti.asi8, years * 12 + months, reso=dti._data._creso)
shifted_dt64 = shifted.view(f"M8[{dti.unit}]")
actual = DatetimeIndex(shifted_dt64)
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw).as_unit(dti.unit)
tm.assert_index_equal(actual, expected)
def test_dt64arr_addsub_object_dtype_2d(performance_warning):
# block-wise DataFrame operations will require operating on 2D
# DatetimeArray/TimedeltaArray, so check that specifically.
dti = date_range("1994-02-13", freq="2W", periods=4)
dta = dti._data.reshape((4, 1))
other = np.array([[pd.offsets.Day(n)] for n in range(4)])
assert other.shape == dta.shape
with tm.assert_produces_warning(performance_warning):
result = dta + other
with tm.assert_produces_warning(performance_warning):
expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(performance_warning):
# Case where we expect to get a TimedeltaArray back
result2 = dta - dta.astype(object)
assert result2.shape == (4, 1)
assert all(td._value == 0 for td in result2.ravel())
def test_non_nano_dt64_addsub_np_nat_scalars():
# GH 52295
ser = Series([1233242342344, 232432434324, 332434242344], dtype="datetime64[ms]")
result = ser - np.datetime64("nat", "ms")
expected = Series([NaT] * 3, dtype="timedelta64[ms]")
tm.assert_series_equal(result, expected)
result = ser + np.timedelta64("nat", "ms")
expected = Series([NaT] * 3, dtype="datetime64[ms]")
tm.assert_series_equal(result, expected)
def test_non_nano_dt64_addsub_np_nat_scalars_unitless():
# GH 52295
# TODO: Can we default to the ser unit?
ser = Series([1233242342344, 232432434324, 332434242344], dtype="datetime64[ms]")
result = ser - np.datetime64("nat")
expected = Series([NaT] * 3, dtype="timedelta64[ns]")
tm.assert_series_equal(result, expected)
result = ser + np.timedelta64("nat")
expected = Series([NaT] * 3, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
def test_non_nano_dt64_addsub_np_nat_scalars_unsupported_unit():
# GH 52295
ser = Series([12332, 23243, 33243], dtype="datetime64[s]")
result = ser - np.datetime64("nat", "D")
expected = Series([NaT] * 3, dtype="timedelta64[s]")
tm.assert_series_equal(result, expected)
result = ser + np.timedelta64("nat", "D")
expected = Series([NaT] * 3, dtype="datetime64[s]")
tm.assert_series_equal(result, expected)
|
TestDatetimeIndexArithmetic
|
python
|
getsentry__sentry
|
src/flagpole/evaluation_context.py
|
{
"start": 355,
"end": 2486
}
|
class ____:
"""
Prepared by the application and passed to flagpole to evaluate
feature conditions.
"""
__data: EvaluationContextDict
__identity_fields: set[str]
__id: int
def __init__(self, data: EvaluationContextDict, identity_fields: set[str] | None = None):
self.__data = deepcopy(data)
self.__set_identity_fields(identity_fields)
self.__id = self.__generate_id()
def __set_identity_fields(self, identity_fields: set[str] | None = None):
trimmed_id_fields = set()
if identity_fields is not None:
for field in identity_fields:
if field in self.__data:
trimmed_id_fields.add(field)
if not trimmed_id_fields:
trimmed_id_fields.update(self.__data.keys())
self.__identity_fields = trimmed_id_fields
def __generate_id(self) -> int:
"""
Generates and return a hashed identifier for this context
The identifier should be stable for a given context contents.
Identifiers are used to determine rollout groups deterministically
and consistently.
"""
keys = list(self.__identity_fields)
vector = []
for key in sorted(keys):
vector.append(key)
vector.append(str(self.__data[key]))
hashed = hashlib.sha1(":".join(vector).encode("utf8"))
return int.from_bytes(hashed.digest(), byteorder="big")
@property
def id(self) -> int:
"""
Guard against context mutation by using this virtual property as a
getter for the private ID field.
"""
return self.__id
def get(self, key: str) -> Any:
return self.__data.get(key)
def has(self, key: str) -> Any:
return key in self.__data
def size(self) -> int:
return len(self.__data)
def to_dict(self) -> EvaluationContextDict:
return deepcopy(self.__data)
def __repr__(self) -> str:
return f"<flagpole.evaluation_context.EvaluationContext data={self.__data!r}>"
T_CONTEXT_DATA = TypeVar("T_CONTEXT_DATA")
|
EvaluationContext
|
python
|
doocs__leetcode
|
solution/1700-1799/1753.Maximum Score From Removing Stones/Solution2.py
|
{
"start": 0,
"end": 190
}
|
class ____:
def maximumScore(self, a: int, b: int, c: int) -> int:
a, b, c = sorted([a, b, c])
if a + b < c:
return a + b
return (a + b + c) >> 1
|
Solution
|
python
|
apache__thrift
|
lib/py/src/transport/TSSLSocket.py
|
{
"start": 7851,
"end": 12535
}
|
class ____(TSocket.TSocket, TSSLBase):
"""
SSL implementation of TSocket
This class creates outbound sockets wrapped using the
python standard ssl module for encrypted connections.
"""
# New signature
# def __init__(self, host='localhost', port=9090, unix_socket=None,
# **ssl_args):
# Deprecated signature
# def __init__(self, host='localhost', port=9090, validate=True,
# ca_certs=None, keyfile=None, certfile=None,
# unix_socket=None, ciphers=None):
def __init__(self, host='localhost', port=9090, *args, **kwargs):
"""Positional arguments: ``host``, ``port``, ``unix_socket``
Keyword arguments: ``keyfile``, ``certfile``, ``cert_reqs``,
``ssl_version``, ``ca_certs``,
``ciphers`` (Python 2.7.0 or later),
``server_hostname`` (Python 2.7.9 or later)
Passed to ssl.wrap_socket. See ssl.wrap_socket documentation.
Alternative keyword arguments: (Python 2.7.9 or later)
``ssl_context``: ssl.SSLContext to be used for SSLContext.wrap_socket
``server_hostname``: Passed to SSLContext.wrap_socket
Common keyword argument:
``validate_callback`` (cert, hostname) -> None:
Called after SSL handshake. Can raise when hostname does not
match the cert.
``socket_keepalive`` enable TCP keepalive, default off.
"""
self.is_valid = False
self.peercert = None
if args:
if len(args) > 6:
raise TypeError('Too many positional argument')
if not self._unix_socket_arg(host, port, args, kwargs):
self._deprecated_arg(args, kwargs, 0, 'validate')
self._deprecated_arg(args, kwargs, 1, 'ca_certs')
self._deprecated_arg(args, kwargs, 2, 'keyfile')
self._deprecated_arg(args, kwargs, 3, 'certfile')
self._deprecated_arg(args, kwargs, 4, 'unix_socket')
self._deprecated_arg(args, kwargs, 5, 'ciphers')
validate = kwargs.pop('validate', None)
if validate is not None:
cert_reqs_name = 'CERT_REQUIRED' if validate else 'CERT_NONE'
warnings.warn(
'validate is deprecated. please use cert_reqs=ssl.%s instead'
% cert_reqs_name,
DeprecationWarning, stacklevel=2)
if 'cert_reqs' in kwargs:
raise TypeError('Cannot specify both validate and cert_reqs')
kwargs['cert_reqs'] = ssl.CERT_REQUIRED if validate else ssl.CERT_NONE
unix_socket = kwargs.pop('unix_socket', None)
socket_keepalive = kwargs.pop('socket_keepalive', False)
self._validate_callback = kwargs.pop('validate_callback', _match_hostname)
TSSLBase.__init__(self, False, host, kwargs)
TSocket.TSocket.__init__(self, host, port, unix_socket,
socket_keepalive=socket_keepalive)
def close(self):
try:
self.handle.settimeout(0.001)
self.handle = self.handle.unwrap()
except (ssl.SSLError, socket.error, OSError):
# could not complete shutdown in a reasonable amount of time. bail.
pass
TSocket.TSocket.close(self)
@property
def validate(self):
warnings.warn('validate is deprecated. please use cert_reqs instead',
DeprecationWarning, stacklevel=2)
return self.cert_reqs != ssl.CERT_NONE
@validate.setter
def validate(self, value):
warnings.warn('validate is deprecated. please use cert_reqs instead',
DeprecationWarning, stacklevel=2)
self.cert_reqs = ssl.CERT_REQUIRED if value else ssl.CERT_NONE
def _do_open(self, family, socktype):
plain_sock = socket.socket(family, socktype)
try:
return self._wrap_socket(plain_sock)
except Exception as ex:
plain_sock.close()
msg = 'failed to initialize SSL'
logger.exception(msg)
raise TTransportException(type=TTransportException.NOT_OPEN, message=msg, inner=ex)
def open(self):
super(TSSLSocket, self).open()
if self._should_verify:
self.peercert = self.handle.getpeercert()
try:
self._validate_callback(self.peercert, self._server_hostname)
self.is_valid = True
except TTransportException:
raise
except Exception as ex:
raise TTransportException(message=str(ex), inner=ex)
|
TSSLSocket
|
python
|
numba__numba
|
numba/tests/test_array_analysis.py
|
{
"start": 3369,
"end": 5743
}
|
class ____(Compiler):
@classmethod
def mk_pipeline(cls, args, return_type=None, flags=None, locals=None,
library=None, typing_context=None, target_context=None):
if locals is None:
locals = {}
if not flags:
flags = Flags()
flags.nrt = True
if typing_context is None:
typing_context = registry.cpu_target.typing_context
if target_context is None:
target_context = registry.cpu_target.target_context
return cls(typing_context, target_context, library, args, return_type,
flags, locals)
def compile_to_ir(self, func, test_idempotence=None):
"""
Populate and run compiler pipeline
"""
self.state.func_id = bytecode.FunctionIdentity.from_function(func)
ExtractByteCode().run_pass(self.state)
self.state.lifted = ()
self.state.lifted_from = None
state = self.state
state.func_ir_copies = []
state.test_idempotence = test_idempotence
name = 'array_analysis_testing'
pm = PassManager(name)
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(FixupArgs, "fix up args")
pm.add_pass(IRProcessing, "processing IR")
# pre typing
if not state.flags.no_rewrites:
pm.add_pass(GenericRewrites, "nopython rewrites")
pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants")
pm.add_pass(DeadBranchPrune, "dead branch pruning")
pm.add_pass(InlineClosureLikes,
"inline calls to locally defined closures")
# typing
pm.add_pass(NopythonTypeInference, "nopython frontend")
if not state.flags.no_rewrites:
pm.add_pass(NopythonRewrites, "nopython rewrites")
# Array Analysis pass
pm.add_pass(ArrayAnalysisPass, "array analysis")
if test_idempotence:
# Do another pass of array analysis to test idempotence
pm.add_pass(ArrayAnalysisPass, "idempotence array analysis")
# legalise
pm.add_pass(IRLegalization, "ensure IR is legal prior to lowering")
pm.add_pass(AnnotateTypes, "annotate types")
# partial compile
pm.finalize()
pm.run(state)
return state.array_analysis
|
ArrayAnalysisTester
|
python
|
conda__conda
|
conda/exceptions.py
|
{
"start": 39203,
"end": 39954
}
|
class ____(CondaError):
def __init__(self, caused_by: Any, **kwargs):
message = (
dals(
"""
A unicode encoding or decoding error has occurred.
Python 2 is the interpreter under which conda is running in your base environment.
Replacing your base environment with one having Python 3 may help resolve this issue.
If you still have a need for Python 2 environments, consider using 'conda create'
and 'conda activate'. For example:
$ conda create -n py2 python=2
$ conda activate py2
Error details: %r
"""
)
% caused_by
)
super().__init__(message, caused_by=caused_by, **kwargs)
|
EncodingError
|
python
|
doocs__leetcode
|
solution/3500-3599/3517.Smallest Palindromic Rearrangement I/Solution.py
|
{
"start": 0,
"end": 369
}
|
class ____:
def smallestPalindrome(self, s: str) -> str:
cnt = Counter(s)
t = []
ch = ""
for c in ascii_lowercase:
v = cnt[c] // 2
t.append(c * v)
cnt[c] -= v * 2
if cnt[c] == 1:
ch = c
ans = "".join(t)
ans = ans + ch + ans[::-1]
return ans
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_config/stack.py
|
{
"start": 1551,
"end": 1639
}
|
class ____(EvaluationStackEntry):
list_index: int
@record
|
EvaluationStackListItemEntry
|
python
|
google__jax
|
tests/colocated_python_test.py
|
{
"start": 1308,
"end": 27671
}
|
class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not HAS_CLOUDPICKLE:
self.skipTest(
"ColocatedPythonTest depends on cloudpickle library"
)
if np.lib.NumpyVersion(np.__version__) < "2.0.0":
self.skipTest(
"Serialization in Colocated Python needs StringDType, and thus"
" requires NumPy 2.0.0 or later"
)
def test_colocated_cpu_devices(self):
mesh = jax.sharding.Mesh(
np.array(jax.local_devices()[:1]).reshape((1, 1)), ("x", "y")
)
cpu_mesh1 = colocated_python.colocated_cpu_devices(mesh)
cpu_devices = colocated_python.colocated_cpu_devices(
jax.local_devices()[:1]
)
cpu_mesh2 = jax.sharding.Mesh(
np.array(cpu_devices).reshape((1, 1)), ("x", "y")
)
self.assertEqual(cpu_mesh1, cpu_mesh2)
def test_serialization_roundtrip(self):
cpu_devices = colocated_python.colocated_cpu_devices(
jax.local_devices()[:1])
mesh = jax.sharding.Mesh(np.array(cpu_devices).reshape((1, 1)), ("x", "y"))
self.assertEqual(
serialization._deserialize(serialization._serialize(mesh)), mesh)
sharding1 = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec("x"))
self.assertEqual(
serialization._deserialize(serialization._serialize([sharding1])),
[sharding1])
sharding2 = jax.sharding.SingleDeviceSharding(
cpu_devices[0], memory_kind="pinned_host")
self.assertEqual(
serialization._deserialize(serialization._serialize((sharding2,))),
(sharding2,))
def func(x):
return x + 1
self.assertEqual(
serialization._deserialize(serialization._serialize(func))(1), func(1))
def test_make_colocated_python_program(self):
def add_one(x):
return x + 1
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
sharding = jax.sharding.SingleDeviceSharding(cpu_devices[0])
sds = jax.ShapeDtypeStruct((), jnp.int32, sharding=sharding)
fun_and_specialization = (
add_one,
None, # dummy in_specs_treedef
None, # dummy in_specs_leaves
None, # dummy out_specs_treedef
None, # dummy out_specs_leaves
None, # dummy devices
)
pickled_function = serialization._serialize(fun_and_specialization)
program = ifrt_programs.make_colocated_python_program(
"add_one", pickled_function, [cpu_devices[0]], [sds], [sds]
)
del program
def test_serialize_with_shared_obj(self):
cpu_devices = colocated_python.colocated_cpu_devices(
jax.local_devices()[:1])
mesh = jax.sharding.Mesh(
np.array(cpu_devices).reshape((1, 1)),
("long_axis_name_1", "long_axis_name_2"))
sharding1 = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec("long_axis_name_1"))
sharding2 = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec("long_axis_name_2"))
serialized1 = serialization._serialize([sharding1])
serialized2 = serialization._serialize([sharding1, sharding2])
serialized3 = serialization._serialize([sharding1, sharding1])
# The total serialized size of two shardings of a shared mesh should be less
# than twice the serialized size of a single sharding.
self.assertLess(len(serialized2), len(serialized1) * 2)
# The total serialized size of two identical shardings should be less than
# that of two shardings that only share the mesh.
self.assertLess(len(serialized3), len(serialized2))
self.assertEqual(serialization._deserialize(serialized1), [sharding1])
self.assertEqual(
serialization._deserialize(serialized2), [sharding1, sharding2])
self.assertEqual(
serialization._deserialize(serialized3), [sharding1, sharding1])
def test_simple_function(self):
@colocated_python.colocated_python
def add_one(x):
return x + 1
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
x = np.array(1)
x = jax.device_put(x, cpu_devices[0])
with _count_colocated_python_specialization_cache_miss() as count:
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, np.array(2))
self.assertEqual(count(), 1)
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, np.array(2))
self.assertEqual(count(), 1)
def test_simple_function_with_tree(self):
@colocated_python.colocated_python
def add_one(x):
return jax.tree.map(lambda x: x + 1, x)
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
x = [np.array(1), (np.array(2), {"v": np.array(3)})]
x = jax.device_put(x, jax.sharding.SingleDeviceSharding(cpu_devices[0]))
with _count_colocated_python_specialization_cache_miss() as count:
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, [np.array(2), (np.array(3), {"v": np.array(4)})])
self.assertEqual(count(), 1)
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, [np.array(2), (np.array(3), {"v": np.array(4)})])
self.assertEqual(count(), 1)
def test_empty_input_fails_without_specialization(self):
@colocated_python.colocated_python
def make_zero():
return jnp.array(0)
with self.assertRaisesRegex(
ValueError,
"No devices found. colocated_python function without input arguments"
" must be first specialized with devices."):
_ = make_zero()
def test_empty_input_with_devices_specialization(self):
@colocated_python.colocated_python
def make_zero():
return jnp.array(0)
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
with _count_colocated_python_specialization_cache_miss() as count:
make_zero = make_zero.specialize(devices=cpu_devices[:1])
out = make_zero()
out = jax.device_get(out)
self.assertEqual(out, np.array(0))
self.assertEqual(count(), 1)
out = make_zero()
out = jax.device_get(out)
self.assertEqual(out, np.array(0))
self.assertEqual(count(), 1)
def test_input_polymorphism_without_out_specs_fn(self):
@colocated_python.colocated_python
def add_one(x):
return jax.tree.map(lambda x: x + 1, x)
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
x = np.array(1)
x = jax.device_put(x, cpu_devices[0])
with _count_colocated_python_specialization_cache_miss() as count:
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, np.array(2))
self.assertEqual(count(), 1)
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, np.array(2))
self.assertEqual(count(), 1)
# Different input tree structure and dtype/shape.
x = [np.array(1), (np.array(2), {"v": np.array(3)})]
x = jax.device_put(x, jax.sharding.SingleDeviceSharding(cpu_devices[0]))
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, [np.array(2), (np.array(3), {"v": np.array(4)})])
self.assertEqual(count(), 2)
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, [np.array(2), (np.array(3), {"v": np.array(4)})])
self.assertEqual(count(), 2)
def test_input_polymorphism_allowed_with_out_specs_fn(self):
@colocated_python.colocated_python
def add_one(x):
return jax.tree.map(lambda x: x + 1, x)
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
x = np.array(1)
x = jax.device_put(x, cpu_devices[0])
with _count_colocated_python_specialization_cache_miss() as count:
add_one = add_one.specialize(out_specs_fn=lambda x: x)
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, np.array(2))
self.assertEqual(count(), 1)
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, np.array(2))
self.assertEqual(count(), 1)
# Different input tree structure and dtype/shape.
x = [np.array(1), (np.array(2), {"v": np.array(3)})]
x = jax.device_put(x, jax.sharding.SingleDeviceSharding(cpu_devices[0]))
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, [np.array(2), (np.array(3), {"v": np.array(4)})])
self.assertEqual(count(), 2)
out = add_one(x)
out = jax.device_get(out)
self.assertEqual(out, [np.array(2), (np.array(3), {"v": np.array(4)})])
self.assertEqual(count(), 2)
@parameterized.named_parameters(
("on_main_thread", True),
("on_non_main_thread", False),
)
# Cannot run concurrently with other tests using `colocated_python._testing_global_state`.
@jtu.thread_unsafe_test()
def test_sequential_execution(self, on_main_thread: bool):
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
x = np.array(1)
x = jax.device_put(x, cpu_devices[0])
@colocated_python.colocated_python
def func0(x: jax.Array) -> jax.Array:
colocated_python._testing_global_state = 100
return x
@colocated_python.colocated_python
def func1(x: jax.Array) -> jax.Array:
assert "_testing_global_state" in colocated_python.__dict__
assert colocated_python._testing_global_state == 100
colocated_python._testing_global_state += 1
return x
@colocated_python.colocated_python
def func2(x: jax.Array) -> jax.Array:
assert "_testing_global_state" in colocated_python.__dict__
assert colocated_python._testing_global_state == 101
return x
@colocated_python.colocated_python
def cleanup(x: jax.Array) -> jax.Array:
if "_testing_global_state" in colocated_python.__dict__:
del colocated_python._testing_global_state
return x
# Specify out_specs_fn so that their executions are asynchronously
# dispatched.
func0 = func0.specialize(out_specs_fn=lambda x: x)
func1 = func1.specialize(out_specs_fn=lambda x: x)
func2 = func2.specialize(out_specs_fn=lambda x: x)
def calls(x: jax.Array) -> None:
# No explicit blocking before making the next call.
func0(x)
func1(x)
jax.block_until_ready(func2(x))
try:
# Executions in `calls` should run sequentially.
if on_main_thread:
calls(x)
else:
t = threading.Thread(target=calls, args=(x,))
t.start()
t.join()
# Executions should succeed without an error.
finally:
jax.block_until_ready(cleanup(x))
# Cannot run concurrently with other tests using `colocated_python._testing_global_state`.
@jtu.thread_unsafe_test()
def test_concurrent_execution(self):
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
x = np.array(1)
x = jax.device_put(x, cpu_devices[0])
@colocated_python.colocated_python
def init(x: jax.Array) -> jax.Array:
colocated_python._testing_global_state = threading.Barrier(3)
return x
@colocated_python.colocated_python
def func(x: jax.Array) -> jax.Array:
assert "_testing_global_state" in colocated_python.__dict__
colocated_python._testing_global_state.wait(timeout=5)
return x
@colocated_python.colocated_python
def cleanup(x: jax.Array) -> jax.Array:
if "_testing_global_state" in colocated_python.__dict__:
del colocated_python._testing_global_state
return x
# Specify out_specs_fn so that their executions are asynchronously
# dispatched.
func = func.specialize(out_specs_fn=lambda x: x)
try:
jax.block_until_ready(init(x))
# All func calls should run concurrently and enter/exit the barrier.
t1 = threading.Thread(target=func, args=(x,))
t2 = threading.Thread(target=func, args=(x,))
t3 = threading.Thread(target=func, args=(x,))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
# Executions should succeed without a deadlock.
finally:
jax.block_until_ready(cleanup(x))
def test_inputs_with_different_device_orders(self):
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())[:2]
if len(cpu_devices) < 2:
self.skipTest("Not enough CPU devices")
@colocated_python.colocated_python
def add(x: jax.Array, y: jax.Array) -> jax.Array:
arrays = [
x.addressable_shards[1].data + y.addressable_shards[0].data,
x.addressable_shards[0].data + y.addressable_shards[1].data,
]
return jax.make_array_from_single_device_arrays(
y.shape, y.sharding, arrays
)
# The execution will use mixed device orders. We should specialize the
# function with devices to avoid the argument-dependent device selection.
add = add.specialize(devices=cpu_devices)
mesh1 = jax.sharding.Mesh([cpu_devices[0], cpu_devices[1]], "x")
sharding1 = jax.sharding.NamedSharding(
mesh1, jax.sharding.PartitionSpec("x")
)
mesh2 = jax.sharding.Mesh([cpu_devices[1], cpu_devices[0]], "x")
sharding2 = jax.sharding.NamedSharding(
mesh2, jax.sharding.PartitionSpec("x")
)
x = np.array([0, 2])
x = jax.device_put(x, sharding1)
y = np.array([4, 8])
y = jax.device_put(y, sharding2)
out = add(x, y)
self.assertEqual(out.sharding, sharding2)
out_device_list = [shard.device for shard in out.addressable_shards]
self.assertEqual(out_device_list, [cpu_devices[1], cpu_devices[0]])
out = jax.device_get(out)
np.testing.assert_equal(out, np.array([2 + 4, 0 + 8]))
def test_module_variable_access(self):
try:
# The following pattern of storing and accessing non-serialized state in
# the Python module is discouraged for storing user-defined state.
# However, it should still work because many caching mechanisms rely on
# this behavior.
# Poison the test's own `colocated_python` module with a non-serializable
# object (file) to detect any invalid attempt to serialize the module as
# part of a colocated Python function.
colocated_python._testing_non_serializable_object = (
tempfile.TemporaryFile()
)
@colocated_python.colocated_python
def set_global_state(x: jax.Array) -> jax.Array:
colocated_python._testing_global_state = x
return x + 1
@colocated_python.colocated_python
def get_global_state(x: jax.Array) -> jax.Array:
del x
return colocated_python._testing_global_state
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
x = np.array(1)
x = jax.device_put(x, cpu_devices[0])
y = np.array(2)
y = jax.device_put(y, cpu_devices[0])
jax.block_until_ready(set_global_state(x))
out = jax.device_get(get_global_state(y))
np.testing.assert_equal(out, np.array(1))
finally:
if "_testing_non_serializable_object" in colocated_python.__dict__:
colocated_python._testing_non_serializable_object.close()
del colocated_python._testing_non_serializable_object
if "_testing_global_state" in colocated_python.__dict__:
del colocated_python._testing_global_state
def test_string_processing(self):
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
if len(cpu_devices) < 2:
self.skipTest(f"Need at least two CPU devices, got: {len(cpu_devices)}")
@colocated_python.colocated_python
def f(x):
out_arrays = []
upper_caser = np.vectorize(
lambda x: x.upper(), otypes=[np.dtypes.StringDType()]
)
for shard in x.addressable_shards:
np_array = jax.device_get(shard.data)
out_np_array = upper_caser(np_array)
out_arrays.append(jax.device_put(out_np_array, device=shard.device))
return jax.make_array_from_single_device_arrays(
sharding=x.sharding, shape=x.shape, arrays=out_arrays
)
# Make a string array.
numpy_string_array = np.array(
[["abcd", "efgh"], ["ijkl", "mnop"]], dtype=np.dtypes.StringDType() # type: ignore
)
mesh = jax.sharding.Mesh(
np.array(cpu_devices[:2]).reshape((2, 1)), ("x", "y")
)
sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec("x"))
x = jax.device_put(numpy_string_array, device=sharding)
# Run the colocated Python function with the string array as input.
out = f(x)
out = jax.device_get(out)
# Should have gotten the strings with all upper case letters.
np.testing.assert_equal(
out,
np.array(
[["ABCD", "EFGH"], ["IJKL", "MNOP"]], dtype=np.dtypes.StringDType()
),
)
def test_binary_data_processing(self):
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
if len(cpu_devices) < 1:
self.skipTest("Need at least one CPU devices")
@colocated_python.colocated_python
def f(x):
out_arrays = []
for shard in x.addressable_shards:
np_array = jax.device_get(shard.data)
input_ints = struct.unpack(
"<ii", base64.b64decode(np_array[0].encode("ascii"))
)
output_string = base64.b64encode(
struct.pack("<ii", input_ints[0] + 1, input_ints[1] + 1)
).decode("ascii")
out_np_array = np.array([output_string], dtype=np.dtypes.StringDType())
out_arrays.append(jax.device_put(out_np_array, device=shard.device))
out = jax.make_array_from_single_device_arrays(
sharding=x.sharding, shape=x.shape, arrays=out_arrays
)
return out
# Make the input array with the binary data that packs two integers as ascii
# string.
input_string = base64.b64encode(struct.pack("<ii", 1001, 1002)).decode(
"ascii"
)
numpy_string_array = np.array([input_string], dtype=np.dtypes.StringDType())
sharding = jax.sharding.SingleDeviceSharding(cpu_devices[0])
x = jax.device_put(numpy_string_array, device=sharding)
out = f(x)
out = jax.device_get(out)
# Should have gotten the binary data with the incremented integers as a
# ascii string.
out_ints = struct.unpack("<ii", base64.b64decode(out[0].encode("ascii")))
self.assertEqual(out_ints[0], 1002)
self.assertEqual(out_ints[1], 1003)
def test_detect_invalid_mesh_device(self):
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
if jax.local_devices()[0].id == cpu_devices[0].id:
self.skipTest(
"This test only works in a setup where accelerator and CPU devices"
" use different device IDs."
)
# mesh contains non-CPU devices. To be used in colocated Python, it should
# have contained CPU devices only.
mesh = jax.sharding.Mesh(jax.local_devices(), "x")
sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec())
@colocated_python.colocated_python
def make_zero() -> jax.Array:
return jax.make_array_from_callback((), sharding, lambda _: np.array(0))
with self.assertRaisesRegex(ValueError, "Invalid device ID"):
make_zero = make_zero.specialize(devices=cpu_devices)
jax.block_until_ready(make_zero())
# Cannot run concurrently with other tests using `colocated_python._testing_global_state`.
@jtu.thread_unsafe_test()
def test_object_lifecycle(self):
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
sharding = jax.sharding.SingleDeviceSharding(cpu_devices[0])
x = jax.device_put(np.array(0), sharding)
@colocated_python.colocated_python_class
class Object:
def __init__(self) -> None:
colocated_python._testing_initialized = True
def __del__(self) -> None:
colocated_python._testing_destroyed = True
def echo(self, x: jax.Array) -> jax.Array:
return x
@colocated_python.colocated_python
def check_initialized() -> jax.Array:
initialized = getattr(colocated_python, "_testing_initialized", False)
return jax.device_put(np.array(initialized), sharding)
@colocated_python.colocated_python
def check_destroyed() -> jax.Array:
destroyed = getattr(colocated_python, "_testing_destroyed", False)
return jax.device_put(np.array(destroyed), sharding)
@colocated_python.colocated_python
def cleanup(x: jax.Array) -> jax.Array:
if "_testing_initialized" in colocated_python.__dict__:
del colocated_python._testing_initialized
if "_testing_destroyed" in colocated_python.__dict__:
del colocated_python._testing_destroyed
return x
check_initialized = check_initialized.specialize(devices=cpu_devices[:1])
check_destroyed = check_destroyed.specialize(devices=cpu_devices[:1])
try:
# Object initialization is deferred until the first method call.
obj = Object()
self.assertEqual(jax.device_get(check_initialized()), False)
self.assertEqual(jax.device_get(check_destroyed()), False)
# If the object is destroyed without any method calls, the object is
# destroyed without initialization.
del obj
self.assertEqual(jax.device_get(check_initialized()), False)
self.assertEqual(jax.device_get(check_destroyed()), False)
finally:
jax.block_until_ready(cleanup(x))
try:
# Object initialization is deferred until the first method call.
obj = Object()
self.assertEqual(jax.device_get(check_initialized()), False)
self.assertEqual(jax.device_get(check_destroyed()), False)
# The first method call on a process triggers object initialization there.
x = np.array(1)
x = jax.device_put(x, sharding)
jax.block_until_ready(obj.echo(x))
self.assertEqual(jax.device_get(check_initialized()), True)
self.assertEqual(jax.device_get(check_destroyed()), False)
del obj
self.assertEqual(jax.device_get(check_initialized()), True)
self.assertEqual(jax.device_get(check_destroyed()), True)
finally:
jax.block_until_ready(cleanup(x))
def test_stateful_object(self):
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
@colocated_python.colocated_python_class
class Value:
def __init__(self, initial_value: np.ndarray) -> None:
self.value = initial_value
def add(self, x: jax.Array) -> jax.Array:
self.value += np.asarray(x)
return jax.device_put(self.value, x.sharding)
def fetch_like(self, x: jax.Array) -> jax.Array:
return jax.device_put(self.value, x.sharding)
value = Value(np.array(5))
x = np.array(1)
x = jax.device_put(x, cpu_devices[0])
out = jax.device_get(value.add(x))
self.assertEqual(out, np.array(6))
out = jax.device_get(value.add(x))
self.assertEqual(out, np.array(7))
out = jax.device_get(value.fetch_like(x))
self.assertEqual(out, np.array(7))
def test_object_with_captured_sharding(self):
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
if len(cpu_devices) < 2:
self.skipTest(f"Need at least two CPU devices, got: {len(cpu_devices)}")
mesh = jax.sharding.Mesh(cpu_devices[0:2], "x")
sharding1 = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec())
sharding2 = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec("x")
)
@colocated_python.colocated_python_class
class Value:
def __init__(self, initial_value: np.ndarray) -> None:
self.value = initial_value
# Captured shardings in the closure.
self.sharding1 = sharding1
self.sharding2 = sharding2
def add_sharding1(self, x: jax.Array) -> jax.Array:
self.value += np.asarray(x)
return jax.device_put(self.value, self.sharding1)
def add_sharding2(self, x: jax.Array) -> jax.Array:
self.value += np.asarray(x)
return jax.device_put(self.value, self.sharding2)
value = Value(np.array([5, 15]))
x = np.array([1])
x = jax.device_put(
x, jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec())
)
out = value.add_sharding1(x)
self.assertEqual(out.sharding, sharding1)
out = jax.device_get(out)
self.assertArraysEqual(out, np.array([6, 16]))
out = value.add_sharding2(x)
self.assertEqual(out.sharding, sharding2)
out = jax.device_get(out)
self.assertArraysEqual(out, np.array([7, 17]))
def test_object_method_specialization(self):
cpu_devices = colocated_python.colocated_cpu_devices(jax.local_devices())
cpu_devices = cpu_devices[:1]
sharding = jax.sharding.SingleDeviceSharding(cpu_devices[0])
@colocated_python.colocated_python_class
class Object:
def __init__(self, sharding: jax.sharding.Sharding) -> None:
self.sharding = sharding
def fetch_with_devices(self) -> jax.Array:
return jax.device_put(np.array(1, dtype=np.int32), self.sharding)
def fetch_with_output_spec(self) -> np.ndarray:
return jax.device_put(np.array(1, dtype=np.int32), self.sharding)
obj = Object(sharding)
with self.assertRaisesRegex(
ValueError,
"No devices found. colocated_python function without input arguments"
" must be first specialized with devices."):
jax.block_until_ready(obj.fetch_with_devices())
with self.assertRaisesRegex(
ValueError,
"No devices found. colocated_python function without input arguments"
" must be first specialized with devices."):
jax.block_until_ready(obj.fetch_with_output_spec())
obj.fetch_with_devices = (
obj.fetch_with_devices.specialize(devices=cpu_devices))
out = obj.fetch_with_devices()
self.assertArraysEqual(out, np.array(1, dtype=np.int32))
# TODO(hyeontaek): Infer `devices` from the output spec computed using the
# output spec function.
obj.fetch_with_output_spec = obj.fetch_with_output_spec.specialize(
devices=cpu_devices,
out_specs_fn=lambda: jax.ShapeDtypeStruct(
shape=(), dtype=np.int32, sharding=sharding))
out = obj.fetch_with_output_spec()
self.assertArraysEqual(out, np.array(1, dtype=np.int32))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
ColocatedPythonTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.