language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/sam2_video/modular_sam2_video.py | {
"start": 43567,
"end": 43623
} | class ____(Sam2FeedForward):
pass
| Sam2VideoFeedForward |
python | bokeh__bokeh | src/bokeh/models/callbacks.py | {
"start": 2037,
"end": 2268
} | class ____(Model):
''' Base class for interactive callback.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| Callback |
python | getsentry__sentry | tests/sentry/rules/conditions/test_regression_event.py | {
"start": 209,
"end": 513
} | class ____(RuleTestCase):
rule_cls = RegressionEventCondition
def test_applies_correctly(self) -> None:
rule = self.get_rule()
self.assertPasses(rule, self.event, is_regression=True)
self.assertDoesNotPass(rule, self.event, is_regression=False)
| RegressionEventConditionTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/query.py | {
"start": 457,
"end": 799
} | class ____(BaseEvent):
"""
QueryEndEvent.
Args:
query (QueryType): Query as a string or query bundle.
response (RESPONSE_TYPE): Response.
"""
query: QueryType
response: RESPONSE_TYPE
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "QueryEndEvent"
| QueryEndEvent |
python | sphinx-doc__sphinx | sphinx/directives/admonitions.py | {
"start": 1199,
"end": 1298
} | class ____(SphinxAdmonition):
required_arguments = 1
node_class = nodes.admonition
| Admonition |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 226274,
"end": 226617
} | class ____(VegaLiteSchema):
"""ConditionalPredicateValueDefFontStylenullExprRef schema wrapper."""
_schema = {
"$ref": "#/definitions/ConditionalPredicate<(ValueDef<(FontStyle|null)>|ExprRef)>"
}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalPredicateValueDefFontStylenullExprRef |
python | sympy__sympy | sympy/core/operations.py | {
"start": 789,
"end": 17785
} | class ____(Basic):
""" Associative operations, can separate noncommutative and
commutative parts.
(a op b) op c == a op (b op c) == a op b op c.
Base class for Add and Mul.
This is an abstract base class, concrete derived classes must define
the attribute `identity`.
.. deprecated:: 1.7
Using arguments that aren't subclasses of :class:`~.Expr` in core
operators (:class:`~.Mul`, :class:`~.Add`, and :class:`~.Pow`) is
deprecated. See :ref:`non-expr-args-deprecated` for details.
Parameters
==========
*args :
Arguments which are operated
evaluate : bool, optional
Evaluate the operation. If not passed, refer to ``global_parameters.evaluate``.
"""
# for performance reason, we don't let is_commutative go to assumptions,
# and keep it right here
__slots__: tuple[str, ...] = ('is_commutative',)
_args_type: type[Basic] | None = None
@cacheit
def __new__(cls, *args, evaluate=None, _sympify=True):
# Allow faster processing by passing ``_sympify=False``, if all arguments
# are already sympified.
if _sympify:
args = list(map(_sympify_, args))
# Disallow non-Expr args in Add/Mul
typ = cls._args_type
if typ is not None:
from .relational import Relational
if any(isinstance(arg, Relational) for arg in args):
raise TypeError("Relational cannot be used in %s" % cls.__name__)
# This should raise TypeError once deprecation period is over:
for arg in args:
if not isinstance(arg, typ):
sympy_deprecation_warning(
f"""
Using non-Expr arguments in {cls.__name__} is deprecated (in this case, one of
the arguments has type {type(arg).__name__!r}).
If you really did intend to use a multiplication or addition operation with
this object, use the * or + operator instead.
""",
deprecated_since_version="1.7",
active_deprecations_target="non-expr-args-deprecated",
stacklevel=4,
)
if evaluate is None:
evaluate = global_parameters.evaluate
if not evaluate:
obj = cls._from_args(args)
obj = cls._exec_constructor_postprocessors(obj)
return obj
args = [a for a in args if a is not cls.identity]
if len(args) == 0:
return cls.identity
if len(args) == 1:
return args[0]
c_part, nc_part, order_symbols = cls.flatten(args)
is_commutative = not nc_part
obj = cls._from_args(c_part + nc_part, is_commutative)
obj = cls._exec_constructor_postprocessors(obj)
if order_symbols is not None:
from sympy.series.order import Order
return Order(obj, *order_symbols)
return obj
@classmethod
def _from_args(cls, args, is_commutative=None):
"""Create new instance with already-processed args.
If the args are not in canonical order, then a non-canonical
result will be returned, so use with caution. The order of
args may change if the sign of the args is changed."""
if len(args) == 0:
return cls.identity
elif len(args) == 1:
return args[0]
obj = super().__new__(cls, *args)
if is_commutative is None:
is_commutative = fuzzy_and(a.is_commutative for a in args)
obj.is_commutative = is_commutative
return obj
def _new_rawargs(self, *args, reeval=True, **kwargs):
"""Create new instance of own class with args exactly as provided by
caller but returning the self class identity if args is empty.
Examples
========
This is handy when we want to optimize things, e.g.
>>> from sympy import Mul, S
>>> from sympy.abc import x, y
>>> e = Mul(3, x, y)
>>> e.args
(3, x, y)
>>> Mul(*e.args[1:])
x*y
>>> e._new_rawargs(*e.args[1:]) # the same as above, but faster
x*y
Note: use this with caution. There is no checking of arguments at
all. This is best used when you are rebuilding an Add or Mul after
simply removing one or more args. If, for example, modifications,
result in extra 1s being inserted they will show up in the result:
>>> m = (x*y)._new_rawargs(S.One, x); m
1*x
>>> m == x
False
>>> m.is_Mul
True
Another issue to be aware of is that the commutativity of the result
is based on the commutativity of self. If you are rebuilding the
terms that came from a commutative object then there will be no
problem, but if self was non-commutative then what you are
rebuilding may now be commutative.
Although this routine tries to do as little as possible with the
input, getting the commutativity right is important, so this level
of safety is enforced: commutativity will always be recomputed if
self is non-commutative and kwarg `reeval=False` has not been
passed.
"""
if reeval and self.is_commutative is False:
is_commutative = None
else:
is_commutative = self.is_commutative
return self._from_args(args, is_commutative)
@classmethod
def flatten(cls, seq):
"""Return seq so that none of the elements are of type `cls`. This is
the vanilla routine that will be used if a class derived from AssocOp
does not define its own flatten routine."""
# apply associativity, no commutativity property is used
new_seq = []
while seq:
o = seq.pop()
if o.__class__ is cls: # classes must match exactly
seq.extend(o.args)
else:
new_seq.append(o)
new_seq.reverse()
# c_part, nc_part, order_symbols
return [], new_seq, None
def _matches_commutative(self, expr, repl_dict=None, old=False):
"""
Matches Add/Mul "pattern" to an expression "expr".
repl_dict ... a dictionary of (wild: expression) pairs, that get
returned with the results
This function is the main workhorse for Add/Mul.
Examples
========
>>> from sympy import symbols, Wild, sin
>>> a = Wild("a")
>>> b = Wild("b")
>>> c = Wild("c")
>>> x, y, z = symbols("x y z")
>>> (a+sin(b)*c)._matches_commutative(x+sin(y)*z)
{a_: x, b_: y, c_: z}
In the example above, "a+sin(b)*c" is the pattern, and "x+sin(y)*z" is
the expression.
The repl_dict contains parts that were already matched. For example
here:
>>> (x+sin(b)*c)._matches_commutative(x+sin(y)*z, repl_dict={a: x})
{a_: x, b_: y, c_: z}
the only function of the repl_dict is to return it in the
result, e.g. if you omit it:
>>> (x+sin(b)*c)._matches_commutative(x+sin(y)*z)
{b_: y, c_: z}
the "a: x" is not returned in the result, but otherwise it is
equivalent.
"""
from .function import _coeff_isneg
# make sure expr is Expr if pattern is Expr
from .expr import Expr
if isinstance(self, Expr) and not isinstance(expr, Expr):
return None
if repl_dict is None:
repl_dict = {}
# handle simple patterns
if self == expr:
return repl_dict
d = self._matches_simple(expr, repl_dict)
if d is not None:
return d
# eliminate exact part from pattern: (2+a+w1+w2).matches(expr) -> (w1+w2).matches(expr-a-2)
from .function import WildFunction
from .symbol import Wild
wild_part, exact_part = sift(self.args, lambda p:
p.has(Wild, WildFunction) and not expr.has(p),
binary=True)
if not exact_part:
wild_part = list(ordered(wild_part))
if self.is_Add:
# in addition to normal ordered keys, impose
# sorting on Muls with leading Number to put
# them in order
wild_part = sorted(wild_part, key=lambda x:
x.args[0] if x.is_Mul and x.args[0].is_Number else
0)
else:
exact = self._new_rawargs(*exact_part)
free = expr.free_symbols
if free and (exact.free_symbols - free):
# there are symbols in the exact part that are not
# in the expr; but if there are no free symbols, let
# the matching continue
return None
newexpr = self._combine_inverse(expr, exact)
if not old and (expr.is_Add or expr.is_Mul):
check = newexpr
if _coeff_isneg(check):
check = -check
if check.count_ops() > expr.count_ops():
return None
newpattern = self._new_rawargs(*wild_part)
return newpattern.matches(newexpr, repl_dict)
# now to real work ;)
i = 0
saw = set()
while expr not in saw:
saw.add(expr)
args = tuple(ordered(self.make_args(expr)))
if self.is_Add and expr.is_Add:
# in addition to normal ordered keys, impose
# sorting on Muls with leading Number to put
# them in order
args = tuple(sorted(args, key=lambda x:
x.args[0] if x.is_Mul and x.args[0].is_Number else
0))
expr_list = (self.identity,) + args
for last_op in reversed(expr_list):
for w in reversed(wild_part):
d1 = w.matches(last_op, repl_dict)
if d1 is not None:
d2 = self.xreplace(d1).matches(expr, d1)
if d2 is not None:
return d2
if i == 0:
if self.is_Mul:
# make e**i look like Mul
if expr.is_Pow and expr.exp.is_Integer:
from .mul import Mul
if expr.exp > 0:
expr = Mul(*[expr.base, expr.base**(expr.exp - 1)], evaluate=False)
else:
expr = Mul(*[1/expr.base, expr.base**(expr.exp + 1)], evaluate=False)
i += 1
continue
elif self.is_Add:
# make i*e look like Add
c, e = expr.as_coeff_Mul()
if abs(c) > 1:
from .add import Add
if c > 0:
expr = Add(*[e, (c - 1)*e], evaluate=False)
else:
expr = Add(*[-e, (c + 1)*e], evaluate=False)
i += 1
continue
# try collection on non-Wild symbols
from sympy.simplify.radsimp import collect
was = expr
did = set()
for w in reversed(wild_part):
c, w = w.as_coeff_mul(Wild)
free = c.free_symbols - did
if free:
did.update(free)
expr = collect(expr, free)
if expr != was:
i += 0
continue
break # if we didn't continue, there is nothing more to do
return
def _has_matcher(self):
"""Helper for .has() that checks for containment of
subexpressions within an expr by using sets of args
of similar nodes, e.g. x + 1 in x + y + 1 checks
to see that {x, 1} & {x, y, 1} == {x, 1}
"""
def _ncsplit(expr):
# this is not the same as args_cnc because here
# we don't assume expr is a Mul -- hence deal with args --
# and always return a set.
cpart, ncpart = sift(expr.args,
lambda arg: arg.is_commutative is True, binary=True)
return set(cpart), ncpart
c, nc = _ncsplit(self)
cls = self.__class__
def is_in(expr):
if isinstance(expr, cls):
if expr == self:
return True
_c, _nc = _ncsplit(expr)
if (c & _c) == c:
if not nc:
return True
elif len(nc) <= len(_nc):
for i in range(len(_nc) - len(nc) + 1):
if _nc[i:i + len(nc)] == nc:
return True
return False
return is_in
def _eval_evalf(self, prec):
"""
Evaluate the parts of self that are numbers; if the whole thing
was a number with no functions it would have been evaluated, but
it wasn't so we must judiciously extract the numbers and reconstruct
the object. This is *not* simply replacing numbers with evaluated
numbers. Numbers should be handled in the largest pure-number
expression as possible. So the code below separates ``self`` into
number and non-number parts and evaluates the number parts and
walks the args of the non-number part recursively (doing the same
thing).
"""
from .add import Add
from .mul import Mul
from .symbol import Symbol
from .function import AppliedUndef
if isinstance(self, (Mul, Add)):
x, tail = self.as_independent(Symbol, AppliedUndef)
# if x is an AssocOp Function then the _evalf below will
# call _eval_evalf (here) so we must break the recursion
if not (tail is self.identity or
isinstance(x, AssocOp) and x.is_Function or
x is self.identity and isinstance(tail, AssocOp)):
# here, we have a number so we just call to _evalf with prec;
# prec is not the same as n, it is the binary precision so
# that's why we don't call to evalf.
x = x._evalf(prec) if x is not self.identity else self.identity
args = []
tail_args = tuple(self.func.make_args(tail))
for a in tail_args:
# here we call to _eval_evalf since we don't know what we
# are dealing with and all other _eval_evalf routines should
# be doing the same thing (i.e. taking binary prec and
# finding the evalf-able args)
newa = a._eval_evalf(prec)
if newa is None:
args.append(a)
else:
args.append(newa)
return self.func(x, *args)
# this is the same as above, but there were no pure-number args to
# deal with
args = []
for a in self.args:
newa = a._eval_evalf(prec)
if newa is None:
args.append(a)
else:
args.append(newa)
return self.func(*args)
@overload
@classmethod
def make_args(cls: type[Add], expr: Expr) -> tuple[Expr, ...]: ... # type: ignore
@overload
@classmethod
def make_args(cls: type[Mul], expr: Expr) -> tuple[Expr, ...]: ... # type: ignore
@overload
@classmethod
def make_args(cls: type[And], expr: Boolean) -> tuple[Boolean, ...]: ... # type: ignore
@overload
@classmethod
def make_args(cls: type[Or], expr: Boolean) -> tuple[Boolean, ...]: ... # type: ignore
@classmethod
def make_args(cls: type[Basic], expr: Basic) -> tuple[Basic, ...]:
"""
Return a sequence of elements `args` such that cls(*args) == expr
Examples
========
>>> from sympy import Symbol, Mul, Add
>>> x, y = map(Symbol, 'xy')
>>> Mul.make_args(x*y)
(x, y)
>>> Add.make_args(x*y)
(x*y,)
>>> set(Add.make_args(x*y + y)) == set([y, x*y])
True
"""
if isinstance(expr, cls):
return expr.args
else:
return (sympify(expr),)
def doit(self, **hints):
if hints.get('deep', True):
terms = [term.doit(**hints) for term in self.args]
else:
terms = self.args
return self.func(*terms, evaluate=True)
| AssocOp |
python | google__pytype | pytype/overlays/dataclass_overlay.py | {
"start": 6728,
"end": 8430
} | class ____(classgen.FieldConstructor):
"""Implements dataclasses.field."""
@classmethod
def make(cls, ctx, module):
return super().make("field", ctx, module)
def call(self, node, func, args, alias_map=None):
"""Returns a type corresponding to a field."""
args = args.simplify(node, self.ctx)
self.match_args(node, args)
node, default_var = self._get_default_var(node, args)
init = self.get_kwarg(args, "init", True)
kw_only = self.get_kwarg(args, "kw_only", None)
typ = FieldInstance(self.ctx, init, default_var, kw_only).to_variable(node)
return node, typ
def _get_default_var(self, node, args):
if "default" in args.namedargs and "default_factory" in args.namedargs:
# The pyi signatures should prevent this; check left in for safety.
raise error_types.DuplicateKeyword(
self.signatures[0].signature, args, self.ctx, "default"
)
elif "default" in args.namedargs:
default_var = args.namedargs["default"]
elif "default_factory" in args.namedargs:
factory_var = args.namedargs["default_factory"]
(factory,) = factory_var.data
f_args = function.Args(posargs=())
node, default_var = factory.call(node, factory_var.bindings[0], f_args)
else:
default_var = None
return node, default_var
def is_field(var):
return var and isinstance(var.data[0], FieldInstance)
def match_initvar(var):
"""Unpack the type parameter from InitVar[T]."""
return abstract_utils.match_type_container(var, "dataclasses.InitVar")
def match_classvar(var):
"""Unpack the type parameter from ClassVar[T]."""
return abstract_utils.match_type_container(var, "typing.ClassVar")
| FieldFunction |
python | google__jax | jax/experimental/jax2tf/tests/call_tf_test.py | {
"start": 2037,
"end": 30868
} | class ____(tf_test_util.JaxToTfTestCase):
def setUp(self):
if tf is None:
raise unittest.SkipTest("Test requires tensorflow")
# TODO(b/171320191): this line works around a missing context initialization
# bug in TensorFlow.
_ = tf.add(1, 1)
super().setUp()
# One TF device of each device_type
self.tf_devices = []
for tf_device in tf.config.list_logical_devices():
if tf_device.device_type == "TPU_SYSTEM":
continue # A virtual device
if all(tf_device.device_type != d.device_type for d in self.tf_devices):
self.tf_devices.append(tf_device)
@_parameterized_jit
def test_eval_scalar_arg(self, with_jit=True):
def f_tf(x):
return tf.math.sin(x)
x = 3.
res = _maybe_jit(with_jit, jax2tf.call_tf(f_tf))(x)
self.assertAllClose(jnp.sin(x), res)
@_parameterized_jit
def test_eval_scalar_res(self, with_jit=True):
x = 3.
res = _maybe_jit(with_jit, jax2tf.call_tf(lambda x: 4.))(x)
self.assertAllClose(4., res, check_dtypes=False)
@_parameterized_jit
def test_eval_numpy_arg(self, with_jit=True):
x = np.ones((2, 3), dtype=np.float32)
res = _maybe_jit(with_jit, jax2tf.call_tf(tf.math.sin))(x)
self.assertAllClose(jnp.sin(x), res)
@_parameterized_jit
def test_eval_numpy_res(self, with_jit=False):
x = np.ones((2, 3))
res = _maybe_jit(with_jit, jax2tf.call_tf(lambda _: x))(x)
self.assertAllClose(x, res)
@_parameterized_jit
def test_eval_devicearray_arg(self, with_jit=False):
x = jnp.ones((2, 3), dtype=np.float32)
res = _maybe_jit(with_jit, jax2tf.call_tf(tf.math.sin))(x)
self.assertAllClose(jnp.sin(x), res)
x = jnp.array(3.0, dtype=jnp.bfloat16)
res = jax2tf.call_tf(lambda x: x)(x)
self.assertAllClose(x, res)
# bfloat16 scalar will create a copy.
with self.assertRaises(AssertionError):
self.assertTrue(np.shares_memory(x, res))
@_parameterized_jit
def test_eval_pytree(self, with_jit=True):
def fun_tf(x: dict, y: tuple) -> tuple:
return (x["first"] * x["second"], y[0] + y[1])
x = dict(first=np.float32(3.), second=np.float32(4.))
y = (np.float64(5.), np.float64(6.))
fun_jax = _maybe_jit(with_jit, jax2tf.call_tf(fun_tf))
res = fun_jax(x, y)
self.assertAllClose((np.float32(12.), np.float64(11.)), res)
def test_result_tuple(self):
x1 = np.ones(3, dtype=np.int32)
x2 = np.ones(5, dtype=np.float32)
def fun_tf():
return tf.tuple([x1, x2])
fun_jax = jax.jit(jax2tf.call_tf(fun_tf))
res = fun_jax()
self.assertAllClose(res, (x1, x2))
def test_error_non_compilable_strings(self):
# Check that in op-by-op we call a function in eager mode.
def f_tf_non_compilable(x):
return tf.strings.length(tf.strings.format("Hello {}!", [x]))
f_jax = jax2tf.call_tf(f_tf_non_compilable)
x = np.float32(0.7)
self.assertAllClose(f_tf_non_compilable(x).numpy(), f_jax(x))
with self.assertRaisesRegex(ValueError,
_call_tf_non_compilable_error):
jax.jit(f_jax)(x)
with self.assertRaisesRegex(ValueError,
_call_tf_non_compilable_error):
lax.cond(True, lambda x: f_jax(x), lambda x: f_jax(x), x)
def test_error_non_compilable_dynamic_shape(self):
# Check that in op-by-op we call a function in eager mode.
def f_tf_non_compilable(x):
return tf.cond(x[0], lambda: x[1:], lambda: x)
f_jax = jax2tf.call_tf(f_tf_non_compilable)
x = np.array([True, False], dtype=np.bool_)
self.assertAllClose(f_tf_non_compilable(x), f_jax(x)) # Works in eager mode
with self.assertRaisesRegex(ValueError, _call_tf_dynamic_shape_error):
jax.jit(f_jax)(x)
def test_error_bad_result_tensorarray(self):
# Call a function that returns a tf.TensorArray. This should be detected
# early on. If we don't the function is actually compilable but returns
# a tuple instead of a single result.
def fun_tf():
ta = tf.TensorArray(tf.int32, size=0, dynamic_size=True)
ta = ta.unstack([0, 1, 2, 3, 4])
return ta
with self.assertRaisesRegex(ValueError,
"The called TF function returns a result that is not convertible to JAX"):
fun_jax = jax.jit(jax2tf.call_tf(fun_tf))
fun_jax()
def test_error_bad_result_string(self):
def fun_tf():
return tf.constant("foo")
# Now under jit, should fail because the function is not compilable
with self.assertRaisesRegex(ValueError,
"The called TF function returns a result that is not convertible to JAX"):
fun_jax = jax.jit(jax2tf.call_tf(fun_tf))
fun_jax()
@_parameterized_jit
def test_control_flow(self, with_jit=True):
def times_5_tf(x):
# Multiply x * 5 using a loop
c = lambda i, acc: tf.less(i, 5)
b = lambda i, acc: (tf.add(i, 1), tf.add(acc, x))
_, acc = tf.while_loop(c, b, [tf.constant(0), tf.constant(0.)])
return acc
def fun_jax(x):
# Calls times_5_tf 3 times in a loop
def body(_, acc):
return jax2tf.call_tf(times_5_tf)(acc)
return lax.fori_loop(0, 3, body, x)
x = np.float32(3.)
res = _maybe_jit(with_jit, fun_jax)(x)
self.assertAllClose(np.float32(x * 5 * 5 * 5), res)
@parameterized.named_parameters(
dict(
testcase_name=f"_{dtype.__name__}{'_jit' if with_jit else ''}",
dtype=dtype,
with_jit=with_jit)
for dtype in set(jtu.dtypes.all) - {np.bool_}
for with_jit in [True, False])
def test_dtypes(self, dtype=np.int32, with_jit=True):
def fun_tf(x):
# AddV2 supports more types
return tf.raw_ops.AddV2(x=x, y=tf.constant(3, dtype=dtype))
def fun_jax(x):
return jax2tf.call_tf(fun_tf)(x) + x
x = np.ones((3,), dtype=dtype)
res = _maybe_jit(with_jit, fun_jax)(x)
self.assertAllClose(dtype(2 * x + 3), res)
@_parameterized_jit
def test_bool(self, with_jit=False):
def fun_tf(x, y):
return tf.math.logical_and(x, y)
x = np.array([True, False, True, False], dtype=np.bool_)
y = np.array([True, True, False, False], dtype=np.bool_)
res = _maybe_jit(with_jit, jax2tf.call_tf(fun_tf))(x, y)
self.assertAllClose(
np.array([True, False, False, False], dtype=np.bool_), res)
@_parameterized_jit
def test_x64_input(self, with_jit=True):
def f_tf(x):
return tf.math.sin(x)
x = 5. # TF interprets this as f64
res_call_tf = _maybe_jit(with_jit, jax2tf.call_tf(f_tf))(x)
res_jax = jnp.sin(x)
self.assertAllClose(res_call_tf, res_jax)
@_parameterized_jit
def test_x64_output(self, with_jit=True):
def f_tf(x):
return (tf.constant(3., tf.float64), x)
x = np.float32(5.)
res_call_tf = _maybe_jit(with_jit, jax2tf.call_tf(f_tf))(x)
res_jax = (3., x)
self.assertAllClose(res_call_tf, res_jax)
res_call_tf_jit = jax.jit(jax2tf.call_tf(f_tf))(x)
self.assertAllClose(res_call_tf_jit, res_jax)
@_parameterized_jit
def test_with_var_read(self, with_jit=True):
# The variable is placed on the default TF device.
outer_var_array = np.array([3., 4.], dtype=np.float32)
outer_var = tf.Variable(outer_var_array)
def fun_tf(x):
return x * outer_var + 1.
x = np.array([2., 5.,], dtype=np.float32)
res = _maybe_jit(with_jit, jax2tf.call_tf(fun_tf))(x)
self.assertAllClose(x * outer_var_array + 1., res, check_dtypes=False)
@_parameterized_jit
def test_with_var_read_x64(self, with_jit=True):
outer_var_array = np.array([3., 4.], dtype=np.float64)
outer_var = tf.Variable(outer_var_array)
def fun_tf(x):
return x * tf.cast(outer_var, x.dtype) + 1.
x = np.array([2., 5.,], dtype=np.float32)
res = _maybe_jit(with_jit, jax2tf.call_tf(fun_tf))(x)
self.assertAllClose(x * outer_var_array + 1., res, check_dtypes=False)
def test_with_var_different_shape(self):
# See https://github.com/jax-ml/jax/issues/6050
v = tf.Variable((4., 2.), dtype=tf.float32)
def tf_func(x):
return v + x
x = np.float32(123.)
tf_out = tf_func(x)
jax_func = jax.jit(jax2tf.call_tf(tf_func))
jax_out = jax_func(x)
self.assertAllClose(tf_out, jax_out, check_dtypes=False)
@_parameterized_jit
def test_with_var_write_error(self, with_jit=True):
if with_jit:
raise unittest.SkipTest("variable writes not yet working")
outer_var = tf.Variable(3., dtype=np.float32)
def fun_tf(x):
outer_var.assign(tf.constant(4.))
return x * outer_var + 1.
x = np.float32(2.)
res = _maybe_jit(with_jit, jax2tf.call_tf(fun_tf))(x)
self.assertAllClose(x * 4. + 1, res, check_dtypes=False)
@_parameterized_jit
def test_with_tensor_capture(self, with_jit=True):
outer_tensor = tf.constant(3., dtype=np.float32)
def fun_tf(x):
return x * outer_tensor + 1.
x = np.float32(2.)
res = _maybe_jit(with_jit, jax2tf.call_tf(fun_tf))(x)
self.assertAllClose(x * 3. + 1., res, check_dtypes=False)
@_parameterized_jit
def test_with_tensor_capture_x64(self, with_jit=True):
outer_tensor = tf.constant(3., dtype=np.float64)
def fun_tf(x):
return x * tf.cast(outer_tensor * 3.14, tf.float32) + 1.
x = np.float32(2.)
res = _maybe_jit(with_jit, jax2tf.call_tf(fun_tf))(x)
self.assertAllClose(x * 3. * 3.14 + 1., res, check_dtypes=False)
@_parameterized_jit
def test_with_value_capture(self, with_jit=True):
outer_val = np.array(3., dtype=np.float32)
def fun_tf(x):
return x * outer_val + 1.
x = np.float32(2.)
res = _maybe_jit(with_jit, jax2tf.call_tf(fun_tf))(x)
self.assertAllClose(x * 3. + 1., res, check_dtypes=False)
@_parameterized_jit
def test_with_multiple_capture(self, with_jit=True):
if jtu.test_device_matches(["gpu"]):
raise unittest.SkipTest("Test fails on GPU")
v2 = tf.Variable(2., dtype=np.float32)
v3 = tf.Variable(3., dtype=np.float32)
t4 = tf.constant(4., dtype=np.float32)
t5 = tf.constant(5., dtype=np.float32)
def fun_tf(x):
return (x * v3 + t4 + v2) * v3 + t5
x = np.float32(2.)
res = _maybe_jit(with_jit, jax2tf.call_tf(fun_tf))(x)
self.assertAllClose((x * 3. + 4. + 2.) * 3. + 5., res, check_dtypes=False)
def test_with_capture_then_convert_again(self):
captured_by_tf = tf.Variable(np.arange(1024, dtype=np.float32))
def tf_fn(x):
return tf.math.add(x, captured_by_tf)
x = np.arange(1024, dtype=np.float32)
res = jax2tf.convert(jax2tf.call_tf(tf_fn))(x)
self.assertAllClose(res, 2 * x)
# The bug appears only when we use non-eager mode on the converted func
res = tf.function(jax2tf.convert(jax2tf.call_tf(tf_fn)),
autograph=False)(x)
self.assertAllClose(res, 2 * x)
@_parameterized_jit
def test_grad(self, with_jit=False):
x = np.float32(3.)
res = _maybe_jit(with_jit, jax.grad(jax2tf.call_tf(tf.math.sin)))(x)
self.assertAllClose(np.cos(x), res)
@_parameterized_jit
def test_grad_pytree(self, with_jit=False):
def fun_tf(x: dict, y: tuple) -> tuple:
return x["first"] * x["second"] + 3. * y[0] + 4. * y[1]
x = dict(first=np.float32(3.), second=np.float32(4.))
y = (np.float32(5.), np.float32(6.))
grad_x = _maybe_jit(with_jit, jax.grad(jax2tf.call_tf(fun_tf)))(x, y)
self.assertAllClose(
dict(first=np.float32(4.), second=np.float32(3.)), grad_x)
def test_grad_nested(self):
# We embed the call_tf function in a larger function whose gradient we take
# It is relevant here that the cotangents flowing through the call_tf
# function are not scalars.
b = np.array([[11., 12., 13.], [21., 22., 23.]], dtype=np.float32) # [2, 3]
c = np.array([[31., 32.], [41., 42.], [51., 52.], [61., 62.]], dtype=np.float32) # [4, 2]
x_dict = dict(b=b, c=c) # b:[2, 3], c=[4, 2]
# res: dict(r:[4, 3], s:[4, 2])
def f_tf(x_dict):
return dict(r=tf.matmul(x_dict["c"], x_dict["b"]), s=7. * x_dict["c"])
@jax.jit # To recognize it in jaxpr
def f_jax(x_dict):
return dict(r=jnp.matmul(x_dict["c"], x_dict["b"]), s=7. * x_dict["c"])
def loss(functional, x_dict):
prediction = functional(x_dict) # r:[4, 3], s:[4, 2]
weights = np.array([1., 2., 3., 4.], dtype=np.float32) # [4]
weighted_pred = jnp.matmul(weights, prediction["r"]) # [3]
return jnp.sum(weighted_pred) + 4. * jnp.sum(prediction["s"])
g_fun_with_tf = jax.grad(partial(loss, jax2tf.call_tf(f_tf)))
g_fun_with_jax = jax.grad(partial(loss, f_jax))
g_tf = g_fun_with_tf(x_dict)
g_jax = g_fun_with_jax(x_dict)
self.assertAllClose(g_jax, g_tf)
def test_grad_int_argument(self):
# Similar to https://github.com/jax-ml/jax/issues/6975
# state is a pytree that contains an integer and a boolean.
# The function returns an integer and a boolean.
def f(param, state, x):
return param * x, state
param = np.array([0.7, 0.9], dtype=np.float32)
state = dict(array=np.float32(1.), counter=7, truth=True)
x = np.float32(3.)
# tf.function is important, without it the bug does not appear
f_call_tf = jax2tf.call_tf(f)
g_call_tf = jax.grad(lambda *args: jnp.sum(f_call_tf(*args)[0]))(param, state, x)
g = jax.grad(lambda *args: jnp.sum(f(*args)[0]))(param, state, x)
self.assertAllClose(g_call_tf, g)
def test_grad_int_argument_unused(self):
batch_size = 5
inputs = np.ones((batch_size, 3), dtype=np.float32)
rng = np.array([1, 2], dtype=np.uint32)
params = np.float32(.5)
# rng is integer, unused
def jax_model(params, rng, inputs):
return jnp.ones([batch_size, 2], dtype=jnp.float32)
tf_model = jax2tf.convert(jax_model, with_gradient=True)
def _loss_fn(inference_fn, params, rng, inputs):
prediction = inference_fn(params, rng, inputs)
return jnp.mean(prediction)
jax_loss_fn = partial(_loss_fn, jax_model)
jax_grad = jax.grad(jax_loss_fn)(params, rng, inputs)
paramsv = tf.Variable(params)
with tf.GradientTape() as tape:
tf_prediction = tf_model(paramsv, rng, inputs)
tf_loss = tf.reduce_mean(tf_prediction)
tf_grad = tape.gradient(tf_loss, paramsv)
self.assertAllClose(jax_grad, tf_grad.numpy())
call_tf_loss_fn = partial(_loss_fn, jax2tf.call_tf(tf_model))
call_tf_grad = jax.grad(call_tf_loss_fn)(params, rng, inputs)
self.assertAllClose(jax_grad, call_tf_grad)
def test_grad_with_float0_result(self):
# Gradient over integer-argument functions, with float0 result
def f_jax(x, y): # x is an int, y is a float; res is a (int, float)
return (2 * x, 2 * x + y * y)
def f_tf(x, y):
# TF needs explicit casts
return (2 * x, tf.cast(2 * x, dtype=y.dtype) + y * y)
def wrapper(functional, x, y): # x: i32
return jnp.sum(2. * functional(3 * x, 4. * y)[1])
grad_g = jax.grad(partial(wrapper, f_jax),
allow_int=True, argnums=(0, 1))
grad_g_call_tf = jax.grad(partial(wrapper, jax2tf.call_tf(f_tf)),
allow_int=True, argnums=(0, 1))
x = np.int32(2)
y = np.float32(3.)
g_jax = grad_g(x, y)
g_call_tf = grad_g_call_tf(x, y)
self.assertEqual(g_jax[0].dtype, dtypes.float0)
self.assertEqual(g_call_tf[0].dtype, dtypes.float0)
self.assertAllClose(g_jax[1], g_call_tf[1])
@_parameterized_jit
def test_grad_custom(self, with_jit=False):
@tf.custom_gradient
def func_square_tf(x):
# Like x ** 2, but with custom grad 3. * x
def grad(dy, variables=None):
# dy, = dys
return 3. * x * dy,
return x * x, grad
x = np.float32(4.)
grad_x = _maybe_jit(with_jit, jax.grad(jax2tf.call_tf(func_square_tf)))(x)
self.assertAllClose(np.float32(3.) * x, grad_x)
@parameterized.named_parameters(
dict(
testcase_name=f"_{degree=}{'_jit' if with_jit else ''}",
degree=degree,
with_jit=with_jit)
for degree in [1, 2, 3, 4]
for with_jit in [True, False])
def test_higher_order_grad(self, degree=2, with_jit=False):
def fun_tf(x):
return 2. * x * x * x
def fun_jax(x):
return 3. * _maybe_jit(with_jit, jax2tf.call_tf(fun_tf))(x)
def fun_jax_pure(x):
return 3. * fun_tf(x)
grad_jax = fun_jax
grad_jax_pure = fun_jax_pure
for _ in range(degree):
grad_jax = jax.grad(grad_jax)
grad_jax_pure = jax.grad(grad_jax_pure)
res_jax = grad_jax(np.float32(5.))
logging.info("Grad of %s degree is %s", degree, res_jax)
self.assertAllClose(res_jax, grad_jax_pure(np.float32(5.)))
def test_pmap(self):
logging.info("Running test_pmap on %s devices", jax.local_device_count())
def plus_2_tf(x):
return tf.math.add(2., x)
def fun_jax(x):
return np.float32(3.) * jax2tf.call_tf(plus_2_tf)(x)
x = np.arange(jax.local_device_count(), dtype=np.float32)
res = jax.pmap(fun_jax)(x)
self.assertAllClose(np.float32(3. * (x + 2)), res)
def test_function_compile_time_constant_inputs(self):
# Call a function for which shape inference does not give an output
# shape.
x = np.array([1, 2, 3], dtype=np.int32)
def fun_tf(x): # x:i32[3]
# Indexing with a dynamic slice makes the TF shape inference return
# a partially known shape.
end_idx = x[1]
res = x[0:end_idx]
return res
# Call in eager mode. Should work!
res1 = jax2tf.call_tf(fun_tf)(x)
self.assertAllClose(x[0:x[1]], res1)
# Now under jit, should fail because the function is not compilable
with self.assertRaisesRegex(ValueError, _call_tf_dynamic_shape_error):
fun_jax = jax.jit(jax2tf.call_tf(fun_tf))
fun_jax(x)
def test_experimental_get_compiler_ir_design_doc(self):
# Not a test of call_tf, but more of how experimental_get_compiler_ir works.
# Examples are from the design doc.
# Constant slice. This is the common case.
x = np.zeros((10,), dtype=np.int32)
def fun_tf(x):
begin = 0
return x[begin:5]
hlo = tf.function(fun_tf, jit_compile=True, autograph=False).experimental_get_compiler_ir(x)()
self.assertIn("(arg0.1: s32[10]) -> s32[5]", hlo)
# Non-constant slice, but compile-time constant depending only on values.
x = np.zeros((10,), dtype=np.int32)
# Non-constant slice, but compile-time constant depending only on shapes.
x = np.zeros((10,), dtype=np.int32)
def fun_tf(x):
begin = tf.shape(x)[0] - 2 # begin is a compile-time constant, even if x is not
return x[begin:]
hlo = tf.function(fun_tf, jit_compile=True, autograph=False).experimental_get_compiler_ir(x)()
self.assertIn("(arg0.1: s32[10]) -> s32[2]", hlo)
# Capture a variable
outer_var = tf.Variable(np.array([3.], dtype=np.float32))
x = np.array([2., 3., 4.], dtype=np.float32)
def fun_tf(x):
return x * tf.broadcast_to(outer_var, x.shape) + 1.
hlo = tf.function(fun_tf, jit_compile=True, autograph=False).experimental_get_compiler_ir(x)()
self.assertRegex(
hlo, r"\(arg0.[0-9]+: f32\[3\], arg1.[0-9]+: f32\[1\]\) -> f32\[3\]"
)
# Capture a constant
outer_ct = np.array([3.], dtype=np.float32)
x = np.array([2., 3., 4.], dtype=np.float32)
def fun_tf(x):
return x * tf.broadcast_to(outer_ct, x.shape) + 1.
hlo = tf.function(fun_tf, jit_compile=True, autograph=False).experimental_get_compiler_ir(x)()
self.assertIn("(arg0.1: f32[3]) -> f32[3]", hlo)
# Call get_compiler_ir in a function context
x = np.array([2., 3., 4.], dtype=np.float32)
def fun_tf_outer(x):
x_const = tf.constant(0, shape=x.shape, dtype=x.dtype)
_ = tf.function(tf.math.sin, jit_compile=True, autograph=False).experimental_get_compiler_ir(x_const)()
# TODO(b/193754660)
# with self.assertRaisesRegex(
# TypeError, "An op outside of the function building code is being passed"):
# tf.function(fun_tf_outer)(x)
#
# with self.assertRaisesRegex(
# TypeError, "An op outside of the function building code is being passed"):
# tf.function(fun_tf_outer, jit_compile=True)(x)
# Call get_concrete_function in a graph context
def fun_tf_outer_2(x):
_ = tf.function(tf.math.sin, jit_compile=True).get_concrete_function(tf.TensorSpec(x.shape, x.dtype))
return x
# Outside of a function context, this works.
_ = tf.function(fun_tf_outer_2)(x)
_ = tf.function(fun_tf_outer_2, jit_compile=True)(x)
def test_repro_193754660(self):
# Try to reproduce b/193754660. I can't.
# We have to have tf.function(jax2tf.convert(jax2tf.call_tf(f_tf))).
# The get_compiler_ir will indeed fail for f_tf. Then we try to use
# shape inference for f_tf.
# I thought to use a f_tf that uses an op without shape inference, e.g.,
# tfxla.gather. If we wash it through a saved_model I expect that shape
# inference would not work on it. Instead, shape inference works!!!
x = np.array([0, 1, 2, 3, 4, 5], dtype=np.int32)
def f_jax(x):
return x[1]
f_tf = jax2tf.convert(f_jax)
f_tf_rt, _ = tf_test_util.SaveAndLoadFunction(f_tf, input_args=[x])
f_jax2 = jax2tf.call_tf(f_tf_rt)
f_tf2 = jax2tf.convert(f_jax2)
res = tf.function(f_tf2, autograph=False)(x)
self.assertAllClose(res.numpy(), f_jax(x))
def test_effectful(self):
x = np.ones((3,), dtype=np.float32)
lower_effect = jax.jit(jax2tf.call_tf(tf.math.sin, has_side_effects=True)).lower(x)
self.assertNotEmpty(lower_effect._lowering.compile_args["unordered_effects"])
lower_no_effect = jax.jit(jax2tf.call_tf(tf.math.sin, has_side_effects=False)).lower(x)
self.assertEmpty(lower_no_effect._lowering.compile_args["unordered_effects"])
def test_module_documentation(self):
def cos_tf(x):
return tf.math.cos(x)
# Compute cos with TF and sin with JAX
def cos_tf_sin_jax(x):
return jax.numpy.sin(jax2tf.call_tf(cos_tf)(x))
# Calls `cos_tf` in TF eager mode
x = np.float32(1.)
cos_tf_sin_jax(x)
# Compiles `cos_tf` using TF and embeds the XLA computation into the JAX
# XLA computation (containing `sin`). The XLA compiler may even be able to
# fuse through JAX-TF computations.
jax.jit(cos_tf_sin_jax)(x)
# Uses TF gradient for `cos_tf` and JAX gradient for `sin`
jax.grad(cos_tf_sin_jax)(x)
logging.info(jax.make_jaxpr(cos_tf_sin_jax)(x))
def test_tf_gather(self):
"""tf_gather gradient output is tf.IndexSlices."""
operand = jnp.array(np.random.uniform(size=(100, 128)))
indices = jnp.array(np.random.randint(low=0, high=100, size=(4000,)))
@tf.function(jit_compile=True, autograph=False)
def fun_tf(operand, indices):
return tf.experimental.numpy.std(tf.gather(operand, indices))
fun_jax = jax2tf.call_tf(fun_tf)
grad_fun_jax = jax.grad(fun_jax)
grad_res = grad_fun_jax(operand, indices)
self.assertEqual(grad_res.shape, (100, 128))
def test_output_shape_dtype_none(self):
x = jnp.zeros((10), dtype=jnp.float32)
@tf.function(jit_compile=True, autograph=False)
def fun_tf(x): # pylint: disable=unused-argument
return
fun_jax_1 = jax2tf.call_tf(fun_tf, output_shape_dtype=None)
fun_jax_2 = jax2tf.call_tf(fun_tf)
self.assertIsNone(fun_jax_1(x))
self.assertIsNone(fun_jax_2(x))
fun_jax_3 = jax2tf.call_tf(
fun_tf, output_shape_dtype=jax.ShapeDtypeStruct((10,), jnp.float32)
)
with self.assertRaisesRegex(
ValueError,
"The pytree of the TensorFlow function results does not match the"
" pytree of the declared output_shape_dtype",
):
_ = fun_jax_3(x)
def test_output_shape_dtype_not_none(self):
x = jnp.zeros((10), dtype=jnp.float32)
@tf.function(jit_compile=True, autograph=False)
def fun_tf(x):
return x
fun_jax_1 = jax2tf.call_tf(
fun_tf, output_shape_dtype=jax.ShapeDtypeStruct((10,), jnp.float32)
)
fun_jax_2 = jax2tf.call_tf(fun_tf)
self.assertAllClose(fun_jax_1(x), fun_jax_2(x))
fun_jax_3 = jax2tf.call_tf(fun_tf, output_shape_dtype=None)
with self.assertRaisesRegex(
ValueError,
"The pytree of the TensorFlow function results does not match the"
" pytree of the declared output_shape_dtype",
):
_ = fun_jax_3(x)
def test_multi_platform(self):
def tf_fun(x):
return tf.math.sin(x)
def f_jax(x):
return jnp.cos(jax2tf.call_tf(tf_fun)(jnp.cos(x)))
x = np.arange(12, dtype=np.float32).reshape((3, 4))
# Find platforms that are available for both JAX and TF
# Pick one device from each available platform
jax_platforms = []
for backend in ["cpu", "gpu", "tpu"]:
try:
devices = jax.devices(backend)
except RuntimeError:
devices = []
if devices:
jax_platforms.append(devices[0].platform)
jax_and_tf_platforms = (
set(jax_platforms) & {d.device_type.lower()
for d in self.tf_devices})
lowering_platforms = ("tpu", "cpu", "cuda")
exp = export.export(jax.jit(f_jax),
platforms=lowering_platforms)(x)
for jax_platform in jax_and_tf_platforms:
with self.subTest(jax_platform):
jax_device = jax.devices(jax_platform)[0]
x_device = jax.device_put(x, jax_device)
logging.info("Running harness natively on %s", jax_device)
native_res = f_jax(x_device)
logging.info("Running exported harness on %s", jax_device)
exported_res = exp.call(x_device)
self.assertAllClose(native_res, exported_res)
def test_multi_platform_call_tf_graph(self):
def tf_fun(x):
return tf.math.sin(x)
def f_jax(x):
return jnp.cos(jax2tf.call_tf(tf_fun,
call_tf_graph=True,
ordered=True)(jnp.cos(x)))
x = np.arange(12, dtype=np.float32).reshape((3, 4))
# When we use call_tf_graph we can serialize for multiple platforms
lowering_platforms = ("tpu", "cpu", "cuda")
# We must use jax2tf.convert to run a call_tf(call_tf_graph)
# TODO(necula): if we remove the tf.function and we have multiple platforms
# then we attempt to lower call_tf multiple times and only the first
# lowering will have the proper side effects for the function_list.
f_tf = tf.function(jax2tf.convert(
f_jax,
native_serialization_platforms=lowering_platforms))
for tf_device in self.tf_devices:
with self.subTest(tf_device.device_type):
logging.info(
f"Running on tf_device = {tf_device} of device_type = {tf_device.device_type}")
with tf.device(tf_device):
res = f_tf(x)
self.assertAllClose(res, f_jax(x))
@parameterized.named_parameters(
{"testcase_name": f"_type={type_.name}", "type_": type_}
for type_ in dlpack.SUPPORTED_DTYPES_SET
)
def test_avoid_copy_between_gpu_and_cpu(self, type_):
try:
gpu_devices = jax.devices("gpu")
except RuntimeError:
gpu_devices = []
if not gpu_devices:
raise unittest.SkipTest("Test requires a GPU device.")
def tf_fun(x):
if type_ == np.dtype('bool'):
return tf.math.logical_or(x, True)
else:
return x + 1
jax_array_on_gpu = jnp.zeros([1], type_, device=gpu_devices[0])
# Since the input array is already on a GPU device, we expect that no memory
# copy occurs between GPU and CPU. Thus, we expect no errors raised by the
# transfer guard.
# There are two exceptions:
# First, when dtype is "int32". This is because almost all TensorFlow
# kernels for GPU devices keep int32 tensors in host memory.
# (https://github.com/tensorflow/tensorflow/blob/4eb3e36d1b0cd511e1677e740bd093f42365cf9f/tensorflow/python/eager/pywrap_tensor.cc#L352-L354)
# Hence, for "int32", we do expect a "host-to-device" copy.
# Second, when using PJRT C API runtime. This is because it currently skips dlpack
# to workaround "PJRT C API does not support GetDefaultLayout" runtime error.
# https://github.com/openxla/xla/blob/762bde36adf22792e91c38fe87cabe5af05bfadc/xla/pjrt/pjrt_c_api_client.h#L285-L289
@contextlib.contextmanager
def _transfer_guard(guard_level):
with contextlib.ExitStack() as stack:
stack.enter_context(jax.transfer_guard_device_to_device(guard_level))
stack.enter_context(jax.transfer_guard_device_to_host(guard_level))
if type_ != jnp.int32:
stack.enter_context(jax.transfer_guard_host_to_device(guard_level))
yield
with _transfer_guard("disallow_explicit"):
jax2tf.call_tf(tf_fun)(jax_array_on_gpu)
@jtu.thread_unsafe_test_class()
| CallTfTest |
python | django__django | tests/contenttypes_tests/test_migrations.py | {
"start": 393,
"end": 1442
} | class ____(TransactionTestCase):
databases = {"default", "other"}
available_apps = ["django.contrib.auth", "django.contrib.contenttypes"]
def test_add_legacy_name_other_database(self):
# add_legacy_name() should update ContentType objects in the specified
# database. Remove ContentTypes from the default database to distinct
# from which database they are fetched.
Permission.objects.all().delete()
ContentType.objects.all().delete()
# ContentType.name in the current version is a property and cannot be
# set, so an AttributeError is raised with the other database.
with self.assertRaises(AttributeError):
with connections["other"].schema_editor() as editor:
remove_content_type_name.add_legacy_name(apps, editor)
# ContentType were removed from the default database.
with connections[DEFAULT_DB_ALIAS].schema_editor() as editor:
remove_content_type_name.add_legacy_name(apps, editor)
| MultiDBRemoveContentTypeNameTests |
python | pytorch__pytorch | torch/distributed/checkpoint/_experimental/checkpoint_process.py | {
"start": 1673,
"end": 13096
} | class ____:
"""
A checkpoint writer that writes checkpoints to a remote process.
"""
def __init__(
self,
rank_info: RankInfo,
config: CheckpointProcessConfig,
subprocess_init_fn: Callable[[Any], None],
subprocess_init_args: tuple[Any, ...],
checkpoint_writer_init_fn: Callable[..., CheckpointWriter],
checkpoint_writer_init_args: dict[str, Any],
):
self._executor = ThreadPoolExecutor(max_workers=1)
self._rank_info = rank_info
self._config = config
self._subprocess_init_fn = subprocess_init_fn
self._subprocess_init_args = subprocess_init_args
self._checkpoint_writer_init_fn = checkpoint_writer_init_fn
self._checkpoint_writer_init_args = checkpoint_writer_init_args
self.process = None
self._parent_end: Optional[Connection] = None
self._child_end: Optional[Connection] = None
self.process_creation_future = self._executor.submit(
self._create_subprocess,
config,
)
def _create_subprocess(
self,
config: CheckpointProcessConfig,
) -> None:
logger.info(
"Creating checkpoint subprocess for rank %d", self._rank_info.global_rank
)
spawn_context = mp.get_context("spawn")
self._parent_end, child_end = spawn_context.Pipe()
# Known workaround for https://github.com/pytorch/pytorch/issues/37377
os.environ["MKL_SERVICE_FORCE_INTEL"] = "GNU"
logger.debug("Spawning subprocess for rank_info=%s", self._rank_info)
self.process = mp.spawn(
fn=CheckpointProcess._subprocess,
args=(
self._rank_info,
child_end,
self._subprocess_init_fn,
self._subprocess_init_args,
self._checkpoint_writer_init_fn,
self._checkpoint_writer_init_args,
),
nprocs=1,
join=False,
daemon=True,
)
# close the child end of the pipe so recv on it will fail
# fast when the child process is terminated unexpectedly.
child_end.close()
self._send(
request_type=RequestType.PING,
payload={},
)
logger.debug(
"Waiting for checkpoint subprocess to initialize (timeout: %ds)",
config.subprocess_init_timeout_secs,
)
# wait for the timeout or a response from subprocess
if self._parent_end is None:
raise AssertionError("Parent end of pipe should be initialized")
if not self._parent_end.poll(timeout=config.subprocess_init_timeout_secs):
msg = f"Timed out after {config.subprocess_init_timeout_secs}s waiting for checkpoint subprocess to initialize"
logger.error(msg)
raise TimeoutError(msg)
self._recv()
logger.info("Checkpoint subprocess initialized successfully")
@staticmethod
def _subprocess(
sub_rank: int,
rank_info: RankInfo,
parent_pipe: Connection,
subprocess_init_fn: Callable[[Any], None],
subprocess_init_args: tuple[Any, ...],
checkpoint_writer_init_fn: Callable[..., CheckpointWriter],
checkpoint_writer_init_args: dict[str, Any],
) -> None:
logger.debug(
"Checkpoint subprocess started for rank %d/%d (PID: %d)",
rank_info.global_rank,
rank_info.global_world_size,
os.getpid(),
)
if sub_rank != 0:
raise AssertionError("We need only one checkpointer per parent training")
request = WorkerRequest(request_type=RequestType.PING, payload={})
try:
# Calling initialize callback, so we can perform app-specific initialization of the subprocess.
subprocess_init_fn(*subprocess_init_args)
# Initialize checkpoint writer - automatically include rank_info in init_args
writer_init_args = dict(checkpoint_writer_init_args)
if "rank_info" not in writer_init_args:
writer_init_args["rank_info"] = rank_info
checkpoint_writer = checkpoint_writer_init_fn(**writer_init_args)
while True:
request = parent_pipe.recv()
if request.request_type == RequestType.PING:
parent_pipe.send(
WorkerResponse(request_type=RequestType.PING, success=True)
)
elif request.request_type == RequestType.WRITE_CHECKPOINT:
path = request.payload["path"]
logger.info("Writing checkpoint to %s", path)
checkpoint_writer.write(
path=path,
state_dict=request.payload["state_dict"],
**request.payload["kwargs"],
)
logger.info("Checkpoint written successfully to %s", path)
parent_pipe.send(
WorkerResponse(RequestType.WRITE_CHECKPOINT, success=True)
)
elif request.request_type == RequestType.TERMINATE_PROCESS:
logger.debug("Received termination request.")
parent_pipe.send(
WorkerResponse(RequestType.TERMINATE_PROCESS, success=True)
)
logger.info("Subprocess terminated gracefully")
break
else:
error_msg = f"Unknown request type: {request.request_type}"
logger.error(error_msg)
raise ValueError(error_msg)
except Exception as e:
error_text = traceback.format_exc()
logger.error(
"Exception in subprocess (%s): %s", type(e).__name__, error_text
)
# Communicating exception via the queue to the main process
parent_pipe.send(
WorkerResponse(
request_type=request.request_type,
success=False,
error_msg=error_text,
)
)
parent_pipe.close()
logger.exception("Subprocess terminated due to exception")
def _send(self, request_type: RequestType, payload: dict[str, Any]) -> None:
try:
if self._parent_end is None:
raise AssertionError("Parent end of pipe should be initialized")
self._parent_end.send(
WorkerRequest(
request_type=request_type,
payload=payload,
)
)
except OSError as e:
error_msg = "Child process terminated unexpectedly"
logger.exception(
"Communication failed during %s request", request_type.value
)
raise RuntimeError(error_msg) from e
def _recv(self) -> Optional[dict[str, Any]]:
try:
if self._parent_end is None:
raise AssertionError("Parent end of pipe should be initialized")
response = self._parent_end.recv()
if response.success is False:
error_msg = (
f"Unexpected response from worker process: {response.error_msg}"
)
logger.error(error_msg)
raise RuntimeError(error_msg)
return response.payload
except (EOFError, BrokenPipeError, ConnectionResetError) as e:
error_msg = f"Child process terminated unexpectedly: {e}"
logger.error(error_msg)
raise RuntimeError(error_msg) from e
def write(
self,
state_dict: Union[STATE_DICT, Future[STATE_DICT]],
path: str,
**kwargs: Any,
) -> Optional[Future[None]]:
logger.debug("Waiting for subprocess initialization to complete")
# wait until the process is started
self.process_creation_future.result()
return self._executor.submit(
self._write,
state_dict,
path,
**kwargs,
)
def _write(
self,
state_dict: Union[STATE_DICT, Future[STATE_DICT]],
path: str,
**kwargs: Any,
) -> None:
logger.debug("Starting checkpoint write to %s", path)
# wait for staging state_dict to be available
if isinstance(state_dict, Future):
logger.debug("Waiting for state_dict Future to resolve")
sd = state_dict.result()
else:
sd = state_dict
# Log state_dict info only if debug logging is enabled (performance-conscious)
if logger.isEnabledFor(logging.DEBUG):
if hasattr(sd, "keys"):
logger.debug("State_dict contains %d keys", len(sd.keys()))
self._send(
request_type=RequestType.WRITE_CHECKPOINT,
payload={
"state_dict": sd,
"path": path,
"kwargs": kwargs,
},
)
logger.debug("Waiting for write completion response")
# wait for response
self._recv()
logger.debug("Checkpoint write to %s completed successfully", path)
def close(self) -> None:
logger.debug(
"Closing CheckpointProcess for rank %d", self._rank_info.global_rank
)
self._executor.shutdown(wait=True, cancel_futures=True)
if self.process and self.process.processes[0].is_alive():
subprocess_pid = self.process.processes[0].pid
# send graceful termination to sub process
try:
# pyrefly: ignore [missing-attribute]
self._parent_end.send(
WorkerRequest(
request_type=RequestType.TERMINATE_PROCESS,
payload={},
)
)
except BrokenPipeError:
logger.warning(
"BrokenPipeError when sending termination request - subprocess (PID: %d) may have already terminated",
subprocess_pid,
)
# subprocess terminated unexpectedly and below code will raise a
# ProcessExitedException.
logger.debug(
"Waiting for subprocess to terminate gracefully (timeout: %ds)",
self._config.subprocess_shutdown_timeout_secs,
)
try:
if not self.process.join(
timeout=self._config.subprocess_shutdown_timeout_secs
):
# graceful shutdown failed, kill the process.
logger.warning(
"Subprocess (PID: %d) did not terminate gracefully within %ds, killing it",
subprocess_pid,
self._config.subprocess_shutdown_timeout_secs,
)
self.process.processes[0].kill()
logger.info("Subprocess killed forcefully")
except ProcessExitedException:
logger.exception("ProcessExitedException during subprocess termination")
raise
logger.debug("CheckpointProcess closed successfully")
| CheckpointProcess |
python | tensorflow__tensorflow | tensorflow/python/tpu/embedding_context_utils.py | {
"start": 785,
"end": 965
} | class ____(threading.local):
def __init__(self):
super().__init__()
self.enabled = True
embedding_pipelining_state = _EmbeddingPipeliningState()
| _EmbeddingPipeliningState |
python | huggingface__transformers | src/transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py | {
"start": 1574,
"end": 3453
} | class ____(ImagesKwargs, total=False):
r"""
keep_aspect_ratio (`bool`, *optional*):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
ensure_multiple_of (`int`, *optional*):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value.
prompt_scale_to_meter (`float`, *optional*):
Scale factor to convert the prompt depth to meters.
"""
keep_aspect_ratio: bool
ensure_multiple_of: int
size_divisor: int
prompt_scale_to_meter: float
def _constrain_to_multiple_of(val, multiple, min_val=0, max_val=None):
x = round(val / multiple) * multiple
if max_val is not None and x > max_val:
x = math.floor(val / multiple) * multiple
if x < min_val:
x = math.ceil(val / multiple) * multiple
return x
def _get_resize_output_image_size(
input_image: np.ndarray,
output_size: tuple[int, int],
keep_aspect_ratio: bool,
multiple: int,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> tuple[int, int]:
input_height, input_width = get_image_size(input_image, input_data_format)
output_height, output_width = output_size
# determine new height and width
scale_height = output_height / input_height
scale_width = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
new_height = _constrain_to_multiple_of(scale_height * input_height, multiple=multiple)
new_width = _constrain_to_multiple_of(scale_width * input_width, multiple=multiple)
return (new_height, new_width)
| PromptDepthAnythingImageProcessorKwargs |
python | protocolbuffers__protobuf | python/google/protobuf/internal/message_test.py | {
"start": 118619,
"end": 119957
} | class ____(unittest.TestCase):
def assertImportFromName(self, msg, base_name):
# Parse <type 'module.class_name'> to extra 'some.name' as a string.
tp_name = str(type(msg)).split("'")[1]
valid_names = ('Repeated%sContainer' % base_name,
'Repeated%sFieldContainer' % base_name)
self.assertTrue(
any(tp_name.endswith(v) for v in valid_names),
'%r does end with any of %r' % (tp_name, valid_names))
parts = tp_name.split('.')
class_name = parts[-1]
module_name = '.'.join(parts[:-1])
__import__(module_name, fromlist=[class_name])
def testTypeNamesCanBeImported(self):
# If import doesn't work, pickling won't work either.
pb = unittest_pb2.TestAllTypes()
self.assertImportFromName(pb.repeated_int32, 'Scalar')
self.assertImportFromName(pb.repeated_nested_message, 'Composite')
# We can only test this case under proto2, because proto3 will reject invalid
# UTF-8 in the parser, so there should be no way of creating a string field
# that contains invalid UTF-8.
#
# We also can't test it in pure-Python, which validates all string fields for
# UTF-8 even when the spec says it shouldn't.
@unittest.skipIf(api_implementation.Type() == 'python',
'Python can\'t create invalid UTF-8 strings')
@testing_refleaks.TestCase
| ValidTypeNamesTest |
python | ray-project__ray | rllib/core/models/tests/test_mlp_heads.py | {
"start": 256,
"end": 3395
} | class ____(unittest.TestCase):
def test_mlp_heads(self):
"""Tests building MLP heads properly and checks for correct architecture."""
# Loop through different combinations of hyperparameters.
inputs_dims_configs = [[1], [50]]
list_of_hidden_layer_dims = [[], [1], [64, 64], [512, 512]]
hidden_layer_activations = ["linear", "relu", "swish"]
hidden_layer_use_layernorms = [False, True]
# Can only test even `output_dims` for FreeLogStdMLPHeadConfig.
output_dims = [2, 50]
output_activations = hidden_layer_activations
hidden_use_biases = [False, True]
output_use_biases = [False, True]
free_stds = [False, True]
for permutation in itertools.product(
inputs_dims_configs,
list_of_hidden_layer_dims,
hidden_layer_activations,
hidden_layer_use_layernorms,
output_activations,
output_dims,
hidden_use_biases,
output_use_biases,
free_stds,
):
(
inputs_dims,
hidden_layer_dims,
hidden_layer_activation,
hidden_layer_use_layernorm,
output_activation,
output_dim,
hidden_use_bias,
output_use_bias,
free_std,
) = permutation
print(
f"Testing ...\n"
f"input_dims: {inputs_dims}\n"
f"hidden_layer_dims: {hidden_layer_dims}\n"
f"hidden_layer_activation: {hidden_layer_activation}\n"
f"hidden_layer_use_layernorm: {hidden_layer_use_layernorm}\n"
f"output_activation: {output_activation}\n"
f"output_dim: {output_dim}\n"
f"free_std: {free_std}\n"
f"hidden_use_bias: {hidden_use_bias}\n"
f"output_use_bias: {output_use_bias}\n"
)
config_cls = FreeLogStdMLPHeadConfig if free_std else MLPHeadConfig
config = config_cls(
input_dims=inputs_dims,
hidden_layer_dims=hidden_layer_dims,
hidden_layer_activation=hidden_layer_activation,
hidden_layer_use_layernorm=hidden_layer_use_layernorm,
hidden_layer_use_bias=hidden_use_bias,
output_layer_dim=output_dim,
output_layer_activation=output_activation,
output_layer_use_bias=output_use_bias,
)
# Use a ModelChecker to compare all added models (different frameworks)
# with each other.
model_checker = ModelChecker(config)
# Add this framework version of the model to our checker.
outputs = model_checker.add(framework="torch", obs=False)
self.assertEqual(outputs.shape, (1, output_dim))
# Check all added models against each other.
model_checker.check()
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestMLPHeads |
python | automl__auto-sklearn | autosklearn/pipeline/components/regression/ard_regression.py | {
"start": 464,
"end": 4454
} | class ____(AutoSklearnRegressionAlgorithm):
def __init__(
self,
n_iter,
tol,
alpha_1,
alpha_2,
lambda_1,
lambda_2,
threshold_lambda,
fit_intercept,
random_state=None,
):
self.random_state = random_state
self.estimator = None
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.threshold_lambda = threshold_lambda
self.fit_intercept = fit_intercept
def fit(self, X, y):
from sklearn.linear_model import ARDRegression
self.n_iter = int(self.n_iter)
self.tol = float(self.tol)
self.alpha_1 = float(self.alpha_1)
self.alpha_2 = float(self.alpha_2)
self.lambda_1 = float(self.lambda_1)
self.lambda_2 = float(self.lambda_2)
self.threshold_lambda = float(self.threshold_lambda)
self.fit_intercept = check_for_bool(self.fit_intercept)
self.estimator = ARDRegression(
n_iter=self.n_iter,
tol=self.tol,
alpha_1=self.alpha_1,
alpha_2=self.alpha_2,
lambda_1=self.lambda_1,
lambda_2=self.lambda_2,
compute_score=False,
threshold_lambda=self.threshold_lambda,
fit_intercept=True,
normalize=False,
copy_X=False,
verbose=False,
)
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
self.estimator.fit(X, y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "ARD",
"name": "ARD Regression",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": False,
"prefers_data_normalized": True,
"is_deterministic": True,
"input": (DENSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
n_iter = UnParametrizedHyperparameter("n_iter", value=300)
tol = UniformFloatHyperparameter(
"tol", 10**-5, 10**-1, default_value=10**-3, log=True
)
alpha_1 = UniformFloatHyperparameter(
name="alpha_1", lower=10**-10, upper=10**-3, default_value=10**-6
)
alpha_2 = UniformFloatHyperparameter(
name="alpha_2",
log=True,
lower=10**-10,
upper=10**-3,
default_value=10**-6,
)
lambda_1 = UniformFloatHyperparameter(
name="lambda_1",
log=True,
lower=10**-10,
upper=10**-3,
default_value=10**-6,
)
lambda_2 = UniformFloatHyperparameter(
name="lambda_2",
log=True,
lower=10**-10,
upper=10**-3,
default_value=10**-6,
)
threshold_lambda = UniformFloatHyperparameter(
name="threshold_lambda",
log=True,
lower=10**3,
upper=10**5,
default_value=10**4,
)
fit_intercept = UnParametrizedHyperparameter("fit_intercept", "True")
cs.add_hyperparameters(
[
n_iter,
tol,
alpha_1,
alpha_2,
lambda_1,
lambda_2,
threshold_lambda,
fit_intercept,
]
)
return cs
| ARDRegression |
python | walkccc__LeetCode | solutions/3419. Minimize the Maximum Edge Weight of Graph/3419.py | {
"start": 0,
"end": 830
} | class ____:
def minMaxWeight(self, n: int, edges: list[list[int]], threshold: int) -> int:
MAX = 1000000
reversedGraph = [[] for _ in range(n)]
for u, v, w in edges:
reversedGraph[v].append((u, w))
l = 1
r = MAX + 1
while l < r:
m = (l + r) // 2
if self._dfs(reversedGraph, 0, m, set()) == n:
r = m
else:
l = m + 1
return -1 if l == MAX + 1 else l
def _dfs(
self,
reversedGraph: list[list[tuple]],
u: int,
maxWeight: int,
seen: set[int]
) -> int:
"""Returns the number of nodes reachable from u with weight <= maxWeight."""
res = 1
seen.add(u)
for v, w in reversedGraph[u]:
if w > maxWeight or v in seen:
continue
res += self._dfs(reversedGraph, v, maxWeight, seen)
return res
| Solution |
python | numpy__numpy | numpy/f2py/tests/test_size.py | {
"start": 56,
"end": 1155
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "size", "foo.f90")]
@pytest.mark.slow
def test_all(self):
r = self.module.foo([[]])
assert r == [0]
r = self.module.foo([[1, 2]])
assert r == [3]
r = self.module.foo([[1, 2], [3, 4]])
assert np.allclose(r, [3, 7])
r = self.module.foo([[1, 2], [3, 4], [5, 6]])
assert np.allclose(r, [3, 7, 11])
@pytest.mark.slow
def test_transpose(self):
r = self.module.trans([[]])
assert np.allclose(r.T, np.array([[]]))
r = self.module.trans([[1, 2]])
assert np.allclose(r, [[1.], [2.]])
r = self.module.trans([[1, 2, 3], [4, 5, 6]])
assert np.allclose(r, [[1, 4], [2, 5], [3, 6]])
@pytest.mark.slow
def test_flatten(self):
r = self.module.flatten([[]])
assert np.allclose(r, [])
r = self.module.flatten([[1, 2]])
assert np.allclose(r, [1, 2])
r = self.module.flatten([[1, 2, 3], [4, 5, 6]])
assert np.allclose(r, [1, 2, 3, 4, 5, 6])
| TestSizeSumExample |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py | {
"start": 176,
"end": 1992
} | class ____(Benchmark):
r"""
Damavandi objective function.
This class defines the Damavandi [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Damavandi}}(x) = \left[ 1 - \lvert{\frac{
\sin[\pi (x_1 - 2)]\sin[\pi (x2 - 2)]}{\pi^2 (x_1 - 2)(x_2 - 2)}}
\rvert^5 \right] \left[2 + (x_1 - 7)^2 + 2(x_2 - 7)^2 \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 14]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0.0` for :math:`x_i = 2` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, 2)
self._bounds = list(zip([0.0] * self.N, [14.0] * self.N))
self.global_optimum = [[2 for _ in range(self.N)]]
self.fglob = np.nan
def fun(self, x, *args):
self.nfev += 1
try:
num = sin(pi * (x[0] - 2.0)) * sin(pi * (x[1] - 2.0))
den = (pi ** 2) * (x[0] - 2.0) * (x[1] - 2.0)
factor1 = 1.0 - (abs(num / den)) ** 5.0
factor2 = 2 + (x[0] - 7.0) ** 2.0 + 2 * (x[1] - 7.0) ** 2.0
return factor1 * factor2
except ZeroDivisionError:
return np.nan
def success(self, x):
"""Is a candidate solution at the global minimum"""
val = self.fun(x)
if np.isnan(val):
return True
try:
assert_almost_equal(val, 0., 4)
return True
except AssertionError:
return False
return False
| Damavandi |
python | pytorch__pytorch | test/fx/test_fx_param_shape_control_flow.py | {
"start": 140,
"end": 512
} | class ____(torch.nn.Module):
def forward(self, x):
matrx = self.get_mul_matrix()
if self.no_relu():
return torch.mm(x, matrx)
else:
return torch.relu(torch.mm(x, matrx))
def get_mul_matrix(self):
return self.param
def no_relu(self):
raise Exception("not implemented") # noqa: TRY002
| MyModuleBase |
python | django__django | django/views/generic/dates.py | {
"start": 13527,
"end": 15013
} | class ____(YearMixin, BaseDateListView):
"""
Base view for a list of objects published in a given year.
This requires subclassing to provide a response mixin.
"""
date_list_period = "month"
make_object_list = False
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_year(date))
lookup_kwargs = {
"%s__gte" % date_field: since,
"%s__lt" % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
if not self.get_make_object_list():
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
qs = qs.none()
return (
date_list,
qs,
{
"year": date,
"next_year": self.get_next_year(date),
"previous_year": self.get_previous_year(date),
},
)
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
| BaseYearArchiveView |
python | falconry__falcon | examples/ws_tutorial/ws_tutorial/app.py | {
"start": 782,
"end": 1331
} | class ____:
async def process_request_ws(self, req: Request, ws: WebSocket):
# This will be called for the HTTP request that initiates the
# WebSocket handshake before routing.
pass
async def process_resource_ws(self, req: Request, ws: WebSocket, resource, params):
# This will be called for the HTTP request that initiates the
# WebSocket handshake after routing (if a route matches the
# request).
logger.info('WebSocket connection established on %r', req.path)
| LoggerMiddleware |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/shape_output_test.py | {
"start": 7816,
"end": 9113
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""The shape profiles has to fit values of shape tensors, but for regular
tensors the values do not matter. Here we test shape profile management with
an INT32 input tensor that is not a shape tensor. The extra inputs with
dim=10 would trigger an error if we mistakenly treat it as shape tensors.
"""
def setUp(self):
super().setUp()
self.DisableNonTrtOptimizers()
def GraphFn(self, x):
z = x * x + x + 1
z = array_ops.identity(z, name="output_0")
return z
def GetParams(self):
return self.BuildParamsWithMask(
self.GraphFn,
dtypes.int32,
[[4]],
[[4]],
extra_inputs=[[[5]], [[10]]],
extra_outputs=[[[5]], [[10]]],
input_mask=[[False]],
output_mask=[[False]],
)
def ExpectedEnginesToBuild(self, run_params):
"""Returns the expected engines to build."""
return ["TRTEngineOp_000"]
def ShouldRunTest(self, run_params):
# Shape op is only converted in dynamic shape mode.
return (
run_params.dynamic_shape
and run_params.is_v2
and not trt_test.IsQuantizationMode(run_params.precision_mode),
"Test v2 dynamic_shapes without INT8",
)
if __name__ == "__main__":
test.main()
| InputProfile |
python | huggingface__transformers | tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py | {
"start": 1937,
"end": 8612
} | class ____:
def get_vision_text_model(self, config, text_config):
pass
def prepare_config_and_inputs(self):
pass
def get_pretrained_model_and_inputs(self):
pass
def check_model_from_pretrained_configs(
self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs
):
config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config)
model = VisionTextDualEncoderModel(config)
model.to(torch_device)
model.eval()
output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask)
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim))
def check_vision_text_dual_encoder_model(
self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs
):
vision_model, text_model = self.get_vision_text_model(vision_config, text_config)
model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model)
model.to(torch_device)
model.eval()
output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask)
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim))
def check_vision_text_dual_encoder_from_pretrained(
self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs
):
vision_model, text_model = self.get_vision_text_model(vision_config, text_config)
kwargs = {"vision_model": vision_model, "text_model": text_model}
model = VisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs)
model.to(torch_device)
model.eval()
output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask)
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim))
def check_save_load(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs):
vision_model, text_model = self.get_vision_text_model(vision_config, text_config)
model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model)
model.to(torch_device)
model.eval()
with torch.no_grad():
output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask)
out_1 = output[0].cpu().numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = VisionTextDualEncoderModel.from_pretrained(tmpdirname).eval()
model.to(torch_device)
after_output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask)
out_2 = after_output[0].cpu().numpy()
max_diff = np.amax(np.abs(out_2 - out_1))
self.assertLessEqual(max_diff, 1e-5)
def check_vision_text_output_attention(
self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs
):
# The backbones don't support dynamic attention setting, so we manually change it. FIXME; when bert is refactored
text_config._attn_implementation = "eager"
vision_config._attn_implementation = "eager"
vision_model, text_model = self.get_vision_text_model(vision_config, text_config)
model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model)
model.to(torch_device)
model.eval()
output = model(
input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True
)
vision_attentions = output.vision_model_output.attentions
self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
image_size = to_2tuple(vision_model.config.image_size)
patch_size = to_2tuple(vision_model.config.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_len = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
text_attentions = output.text_model_output.attentions
self.assertEqual(len(text_attentions), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:],
(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]),
)
def test_vision_text_dual_encoder_model(self):
inputs_dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**inputs_dict)
def test_model_from_pretrained_configs(self):
inputs_dict = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**inputs_dict)
def test_vision_text_dual_encoder_from_pretrained(self):
inputs_dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**inputs_dict)
def test_save_load(self):
inputs_dict = self.prepare_config_and_inputs()
self.check_save_load(**inputs_dict)
def test_vision_text_output_attention(self):
inputs_dict = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**inputs_dict)
@slow
def test_real_model_save_load_from_pretrained(self):
model_2, inputs = self.get_pretrained_model_and_inputs()
model_2.to(torch_device)
with torch.no_grad():
outputs = model_2(**inputs)
out_2 = outputs[0].cpu().numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_2.save_pretrained(tmp_dirname)
model_1 = VisionTextDualEncoderModel.from_pretrained(tmp_dirname)
model_1.to(torch_device)
after_outputs = model_1(**inputs)
out_1 = after_outputs[0].cpu().numpy()
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
@require_torch
| VisionTextDualEncoderMixin |
python | doocs__leetcode | solution/2200-2299/2247.Maximum Cost of Trip With K Highways/Solution.py | {
"start": 0,
"end": 749
} | class ____:
def maximumCost(self, n: int, highways: List[List[int]], k: int) -> int:
if k >= n:
return -1
g = defaultdict(list)
for a, b, cost in highways:
g[a].append((b, cost))
g[b].append((a, cost))
f = [[-inf] * n for _ in range(1 << n)]
for i in range(n):
f[1 << i][i] = 0
ans = -1
for i in range(1 << n):
for j in range(n):
if i >> j & 1:
for h, cost in g[j]:
if i >> h & 1:
f[i][j] = max(f[i][j], f[i ^ (1 << j)][h] + cost)
if i.bit_count() == k + 1:
ans = max(ans, f[i][j])
return ans
| Solution |
python | scrapy__scrapy | tests/test_settings/__init__.py | {
"start": 21991,
"end": 37536
} | class ____:
pass
@pytest.mark.parametrize(
("before", "name", "old_cls", "new_cls", "priority", "after"),
[
({}, "FOO", Component1, Component2, None, KeyError),
(
{"FOO": {Component1: 1}},
"FOO",
Component1,
Component2,
None,
{"FOO": {Component2: 1}},
),
(
{"FOO": {Component1: 1}},
"FOO",
Component1,
Component2,
2,
{"FOO": {Component2: 2}},
),
(
{"FOO": {"tests.test_settings.Component1": 1}},
"FOO",
Component1,
Component2,
None,
{"FOO": {Component2: 1}},
),
(
{"FOO": {Component1Alias: 1}},
"FOO",
Component1,
Component2,
None,
{"FOO": {Component2: 1}},
),
(
{"FOO": {Component1Alias: 1}},
"FOO",
Component1,
Component2,
2,
{"FOO": {Component2: 2}},
),
(
{"FOO": {"tests.test_settings.Component1Alias": 1}},
"FOO",
Component1,
Component2,
None,
{"FOO": {Component2: 1}},
),
(
{"FOO": {"tests.test_settings.Component1Alias": 1}},
"FOO",
Component1,
Component2,
2,
{"FOO": {Component2: 2}},
),
(
{
"FOO": {
"tests.test_settings.Component1": 1,
"tests.test_settings.Component1Alias": 2,
}
},
"FOO",
Component1,
Component2,
None,
{"FOO": {Component2: 2}},
),
(
{
"FOO": {
"tests.test_settings.Component1": 1,
"tests.test_settings.Component1Alias": 2,
}
},
"FOO",
Component1,
Component2,
3,
{"FOO": {Component2: 3}},
),
(
{"FOO": '{"tests.test_settings.Component1": 1}'},
"FOO",
Component1,
Component2,
None,
{"FOO": {Component2: 1}},
),
(
{"FOO": '{"tests.test_settings.Component1": 1}'},
"FOO",
Component1,
Component2,
2,
{"FOO": {Component2: 2}},
),
(
{"FOO": '{"tests.test_settings.Component1Alias": 1}'},
"FOO",
Component1,
Component2,
None,
{"FOO": {Component2: 1}},
),
(
{"FOO": '{"tests.test_settings.Component1Alias": 1}'},
"FOO",
Component1,
Component2,
2,
{"FOO": {Component2: 2}},
),
(
{
"FOO": '{"tests.test_settings.Component1": 1, "tests.test_settings.Component1Alias": 2}'
},
"FOO",
Component1,
Component2,
None,
{"FOO": {Component2: 2}},
),
(
{
"FOO": '{"tests.test_settings.Component1": 1, "tests.test_settings.Component1Alias": 2}'
},
"FOO",
Component1,
Component2,
3,
{"FOO": {Component2: 3}},
),
# If old_cls has None as value, raise KeyError.
(
{"FOO": {Component1: None}},
"FOO",
Component1,
Component2,
None,
KeyError,
),
(
{"FOO": '{"tests.test_settings.Component1": null}'},
"FOO",
Component1,
Component2,
None,
KeyError,
),
(
{"FOO": {Component1: None, "tests.test_settings.Component1": None}},
"FOO",
Component1,
Component2,
None,
KeyError,
),
(
{"FOO": {Component1: 1, "tests.test_settings.Component1": None}},
"FOO",
Component1,
Component2,
None,
KeyError,
),
(
{"FOO": {Component1: None, "tests.test_settings.Component1": 1}},
"FOO",
Component1,
Component2,
None,
KeyError,
),
# Unrelated components are kept as is, as expected.
(
{
"FOO": {
Component1: 1,
"tests.test_settings.Component2": 2,
Component3: 3,
}
},
"FOO",
Component3,
Component4,
None,
{
"FOO": {
Component1: 1,
"tests.test_settings.Component2": 2,
Component4: 3,
}
},
),
],
)
def test_replace_in_component_priority_dict(
before, name, old_cls, new_cls, priority, after
):
settings = BaseSettings(before, priority=0)
if isinstance(after, type) and issubclass(after, Exception):
with pytest.raises(after):
settings.replace_in_component_priority_dict(
name, old_cls, new_cls, priority
)
return
expected_priority = settings.getpriority(name) or 0
settings.replace_in_component_priority_dict(name, old_cls, new_cls, priority)
expected_settings = BaseSettings(after, priority=expected_priority)
assert settings == expected_settings
assert settings.getpriority(name) == expected_settings.getpriority(name)
@pytest.mark.parametrize(
("before", "name", "cls", "priority", "after"),
[
# Set
({}, "FOO", Component1, None, {"FOO": {Component1: None}}),
({}, "FOO", Component1, 0, {"FOO": {Component1: 0}}),
({}, "FOO", Component1, 1, {"FOO": {Component1: 1}}),
# Add
(
{"FOO": {Component1: 0}},
"FOO",
Component2,
None,
{"FOO": {Component1: 0, Component2: None}},
),
(
{"FOO": {Component1: 0}},
"FOO",
Component2,
0,
{"FOO": {Component1: 0, Component2: 0}},
),
(
{"FOO": {Component1: 0}},
"FOO",
Component2,
1,
{"FOO": {Component1: 0, Component2: 1}},
),
# Replace
(
{
"FOO": {
Component1: None,
"tests.test_settings.Component1": 0,
"tests.test_settings.Component1Alias": 1,
Component1Subclass: None,
"tests.test_settings.Component1Subclass": 0,
"tests.test_settings.Component1SubclassAlias": 1,
}
},
"FOO",
Component1,
None,
{
"FOO": {
Component1: None,
Component1Subclass: None,
"tests.test_settings.Component1Subclass": 0,
"tests.test_settings.Component1SubclassAlias": 1,
}
},
),
(
{
"FOO": {
Component1: 0,
"tests.test_settings.Component1": 1,
"tests.test_settings.Component1Alias": None,
Component1Subclass: 0,
"tests.test_settings.Component1Subclass": 1,
"tests.test_settings.Component1SubclassAlias": None,
}
},
"FOO",
Component1,
0,
{
"FOO": {
Component1: 0,
Component1Subclass: 0,
"tests.test_settings.Component1Subclass": 1,
"tests.test_settings.Component1SubclassAlias": None,
}
},
),
(
{
"FOO": {
Component1: 1,
"tests.test_settings.Component1": None,
"tests.test_settings.Component1Alias": 0,
Component1Subclass: 1,
"tests.test_settings.Component1Subclass": None,
"tests.test_settings.Component1SubclassAlias": 0,
}
},
"FOO",
Component1,
1,
{
"FOO": {
Component1: 1,
Component1Subclass: 1,
"tests.test_settings.Component1Subclass": None,
"tests.test_settings.Component1SubclassAlias": 0,
}
},
),
# String-based setting values
(
{"FOO": '{"tests.test_settings.Component1": 0}'},
"FOO",
Component2,
None,
{"FOO": {"tests.test_settings.Component1": 0, Component2: None}},
),
(
{
"FOO": """{
"tests.test_settings.Component1": 0,
"tests.test_settings.Component1Alias": 1,
"tests.test_settings.Component1Subclass": 0,
"tests.test_settings.Component1SubclassAlias": 1
}"""
},
"FOO",
Component1,
None,
{
"FOO": {
Component1: None,
"tests.test_settings.Component1Subclass": 0,
"tests.test_settings.Component1SubclassAlias": 1,
}
},
),
],
)
def test_set_in_component_priority_dict(before, name, cls, priority, after):
settings = BaseSettings(before, priority=0)
expected_priority = settings.getpriority(name) or 0
settings.set_in_component_priority_dict(name, cls, priority)
expected_settings = BaseSettings(after, priority=expected_priority)
assert settings == expected_settings
assert settings.getpriority(name) == expected_settings.getpriority(name), (
f"{settings.getpriority(name)=} != {expected_settings.getpriority(name)=}"
)
@pytest.mark.parametrize(
("before", "name", "cls", "priority", "after"),
[
# Set
({}, "FOO", Component1, None, {"FOO": {Component1: None}}),
({}, "FOO", Component1, 0, {"FOO": {Component1: 0}}),
({}, "FOO", Component1, 1, {"FOO": {Component1: 1}}),
# Add
(
{"FOO": {Component1: 0}},
"FOO",
Component2,
None,
{"FOO": {Component1: 0, Component2: None}},
),
(
{"FOO": {Component1: 0}},
"FOO",
Component2,
0,
{"FOO": {Component1: 0, Component2: 0}},
),
(
{"FOO": {Component1: 0}},
"FOO",
Component2,
1,
{"FOO": {Component1: 0, Component2: 1}},
),
# Keep
(
{
"FOO": {
Component1: None,
"tests.test_settings.Component1": 0,
"tests.test_settings.Component1Alias": 1,
Component1Subclass: None,
"tests.test_settings.Component1Subclass": 0,
"tests.test_settings.Component1SubclassAlias": 1,
}
},
"FOO",
Component1,
None,
{
"FOO": {
Component1: None,
"tests.test_settings.Component1": 0,
"tests.test_settings.Component1Alias": 1,
Component1Subclass: None,
"tests.test_settings.Component1Subclass": 0,
"tests.test_settings.Component1SubclassAlias": 1,
}
},
),
(
{
"FOO": {
Component1: 0,
"tests.test_settings.Component1": 1,
"tests.test_settings.Component1Alias": None,
Component1Subclass: 0,
"tests.test_settings.Component1Subclass": 1,
"tests.test_settings.Component1SubclassAlias": None,
}
},
"FOO",
Component1,
0,
{
"FOO": {
Component1: 0,
"tests.test_settings.Component1": 1,
"tests.test_settings.Component1Alias": None,
Component1Subclass: 0,
"tests.test_settings.Component1Subclass": 1,
"tests.test_settings.Component1SubclassAlias": None,
}
},
),
(
{
"FOO": {
Component1: 1,
"tests.test_settings.Component1": None,
"tests.test_settings.Component1Alias": 0,
Component1Subclass: 1,
"tests.test_settings.Component1Subclass": None,
"tests.test_settings.Component1SubclassAlias": 0,
}
},
"FOO",
Component1,
1,
{
"FOO": {
Component1: 1,
"tests.test_settings.Component1": None,
"tests.test_settings.Component1Alias": 0,
Component1Subclass: 1,
"tests.test_settings.Component1Subclass": None,
"tests.test_settings.Component1SubclassAlias": 0,
}
},
),
# String-based setting values
(
{"FOO": '{"tests.test_settings.Component1": 0}'},
"FOO",
Component2,
None,
{"FOO": {"tests.test_settings.Component1": 0, Component2: None}},
),
(
{
"FOO": """{
"tests.test_settings.Component1": 0,
"tests.test_settings.Component1Alias": 1,
"tests.test_settings.Component1Subclass": 0,
"tests.test_settings.Component1SubclassAlias": 1
}"""
},
"FOO",
Component1,
None,
{
"FOO": """{
"tests.test_settings.Component1": 0,
"tests.test_settings.Component1Alias": 1,
"tests.test_settings.Component1Subclass": 0,
"tests.test_settings.Component1SubclassAlias": 1
}"""
},
),
],
)
def test_setdefault_in_component_priority_dict(before, name, cls, priority, after):
settings = BaseSettings(before, priority=0)
expected_priority = settings.getpriority(name) or 0
settings.setdefault_in_component_priority_dict(name, cls, priority)
expected_settings = BaseSettings(after, priority=expected_priority)
assert settings == expected_settings
assert settings.getpriority(name) == expected_settings.getpriority(name)
| Component4 |
python | allegroai__clearml | clearml/backend_config/bucket_config.py | {
"start": 9909,
"end": 10572
} | class ____(object):
bucket = attrib(type=str)
subdir = attrib(type=str, converter=_url_stripper, default="")
project = attrib(type=str, default=None)
credentials_json = attrib(type=str, default=None)
pool_connections = attrib(type=int, default=None)
pool_maxsize = attrib(type=int, default=None)
def update(self, **kwargs: Any) -> None:
for item in kwargs:
if not hasattr(self, item):
warnings.warn("Unexpected argument {} for update. Ignored".format(item))
else:
setattr(self, item, kwargs[item])
def is_valid(self) -> bool:
return self.bucket
| GSBucketConfig |
python | justquick__django-activity-stream | actstream/feeds.py | {
"start": 5630,
"end": 6960
} | class ____(AbstractActivityStream, Feed):
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, action):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
Add the 'content' field of the 'Entry' item, to be used by the custom
feed generator.
"""
item = self.format(action)
item.pop('title', None)
item['uri'] = item.pop('url')
item['activity:verb'] = item.pop('verb')
return item
def format_item(self, action, item_type='actor'):
name = item_type == 'actor' and 'name' or 'title'
item = super(ActivityStreamsBaseFeed, self).format_item(action, item_type)
item[name] = item.pop('displayName')
item['activity:object-type'] = item.pop('objectType')
item.pop('url')
return item
def item_link(self, action):
return self.get_url(action)
def item_description(self, action):
if action.description:
return force_str(action.description)
def items(self, obj):
return self.get_stream()(obj)[:30]
| ActivityStreamsBaseFeed |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_visual.py | {
"start": 1783,
"end": 4309
} | class ____:
def test_valid_named(self) -> None:
f = Foo()
assert f.pat == []
f.pat = "solid"
assert f.pat == []
f.pat = "dashed"
assert f.pat == [6]
f.pat = "dotted"
assert f.pat == [2, 4]
f.pat = "dotdash"
assert f.pat == [2, 4, 6, 4]
f.pat = "dashdot"
assert f.pat == [6, 4, 2, 4]
def test_valid_string(self) -> None:
f = Foo()
f.pat = ""
assert f.pat == []
f.pat = "2"
assert f.pat == [2]
f.pat = "2 4"
assert f.pat == [2, 4]
f.pat = "2 4 6"
assert f.pat == [2, 4, 6]
with pytest.raises(ValueError):
f.pat = "abc 6"
def test_valid_list(self) -> None:
f = Foo()
f.pat = ()
assert f.pat == ()
f.pat = (2,)
assert f.pat == (2,)
f.pat = (2, 4)
assert f.pat == (2, 4)
f.pat = (2, 4, 6)
assert f.pat == (2, 4, 6)
with pytest.raises(ValueError):
f.pat = (2, 4.2)
with pytest.raises(ValueError):
f.pat = (2, "a")
def test_valid(self) -> None:
prop = bcpv.DashPattern()
assert prop.is_valid("")
assert prop.is_valid(())
assert prop.is_valid([])
assert prop.is_valid("solid")
assert prop.is_valid("dashed")
assert prop.is_valid("dotted")
assert prop.is_valid("dotdash")
assert prop.is_valid("dashdot")
assert prop.is_valid([1, 2, 3])
assert prop.is_valid("1 2 3")
def test_invalid(self) -> None:
prop = bcpv.DashPattern()
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("foo")
assert not prop.is_valid("DASHDOT")
assert not prop.is_valid([1, 2, 3.0])
assert not prop.is_valid("1 2 x")
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_has_ref(self) -> None:
prop = bcpv.DashPattern()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpv.DashPattern()
assert str(prop) == "DashPattern"
css_units = "%|em|ex|ch|ic|rem|vw|vh|vi|vb|vmin|vmax|cm|mm|q|in|pc|pt|px"
| TestDashPattern |
python | boto__boto3 | boto3/dynamodb/transform.py | {
"start": 10084,
"end": 12909
} | class ____:
"""Transforms the input to and output from botocore based on shape"""
def transform(self, params, model, transformation, target_shape):
"""Transforms the dynamodb input to or output from botocore
It applies a specified transformation whenever a specific shape name
is encountered while traversing the parameters in the dictionary.
:param params: The parameters structure to transform.
:param model: The operation model.
:param transformation: The function to apply the parameter
:param target_shape: The name of the shape to apply the
transformation to
"""
self._transform_parameters(model, params, transformation, target_shape)
def _transform_parameters(
self, model, params, transformation, target_shape
):
type_name = model.type_name
if type_name in ('structure', 'map', 'list'):
getattr(self, f'_transform_{type_name}')(
model, params, transformation, target_shape
)
def _transform_structure(
self, model, params, transformation, target_shape
):
if not isinstance(params, collections_abc.Mapping):
return
for param in params:
if param in model.members:
member_model = model.members[param]
member_shape = member_model.name
if member_shape == target_shape:
params[param] = transformation(params[param])
else:
self._transform_parameters(
member_model,
params[param],
transformation,
target_shape,
)
def _transform_map(self, model, params, transformation, target_shape):
if not isinstance(params, collections_abc.Mapping):
return
value_model = model.value
value_shape = value_model.name
for key, value in params.items():
if value_shape == target_shape:
params[key] = transformation(value)
else:
self._transform_parameters(
value_model, params[key], transformation, target_shape
)
def _transform_list(self, model, params, transformation, target_shape):
if not isinstance(params, collections_abc.MutableSequence):
return
member_model = model.member
member_shape = member_model.name
for i, item in enumerate(params):
if member_shape == target_shape:
params[i] = transformation(item)
else:
self._transform_parameters(
member_model, params[i], transformation, target_shape
)
| ParameterTransformer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategy_options.py | {
"start": 2711,
"end": 33184
} | class ____(traversals.GenerativeOnTraversal, LoaderOption):
__slots__ = ("propagate_to_loaders",)
_is_strategy_option = True
propagate_to_loaders: bool
def contains_eager(
self,
attr: _AttrType,
alias: Optional[_FromClauseArgument] = None,
_is_chain: bool = False,
_propagate_to_loaders: bool = False,
) -> Self:
r"""Indicate that the given attribute should be eagerly loaded from
columns stated manually in the query.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
The option is used in conjunction with an explicit join that loads
the desired rows, i.e.::
sess.query(Order).join(Order.user).options(contains_eager(Order.user))
The above query would join from the ``Order`` entity to its related
``User`` entity, and the returned ``Order`` objects would have the
``Order.user`` attribute pre-populated.
It may also be used for customizing the entries in an eagerly loaded
collection; queries will normally want to use the
:ref:`orm_queryguide_populate_existing` execution option assuming the
primary collection of parent objects may already have been loaded::
sess.query(User).join(User.addresses).filter(
Address.email_address.like("%@aol.com")
).options(contains_eager(User.addresses)).populate_existing()
See the section :ref:`contains_eager` for complete usage details.
.. seealso::
:ref:`loading_toplevel`
:ref:`contains_eager`
"""
if alias is not None:
if not isinstance(alias, str):
coerced_alias = coercions.expect(roles.FromClauseRole, alias)
else:
util.warn_deprecated(
"Passing a string name for the 'alias' argument to "
"'contains_eager()` is deprecated, and will not work in a "
"future release. Please use a sqlalchemy.alias() or "
"sqlalchemy.orm.aliased() construct.",
version="1.4",
)
coerced_alias = alias
elif getattr(attr, "_of_type", None):
assert isinstance(attr, QueryableAttribute)
ot: Optional[_InternalEntityType[Any]] = inspect(attr._of_type)
assert ot is not None
coerced_alias = ot.selectable
else:
coerced_alias = None
cloned = self._set_relationship_strategy(
attr,
{"lazy": "joined"},
propagate_to_loaders=_propagate_to_loaders,
opts={"eager_from_alias": coerced_alias},
_reconcile_to_other=True if _is_chain else None,
)
return cloned
def load_only(self, *attrs: _AttrType, raiseload: bool = False) -> Self:
r"""Indicate that for a particular entity, only the given list
of column-based attribute names should be loaded; all others will be
deferred.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
Example - given a class ``User``, load only the ``name`` and
``fullname`` attributes::
session.query(User).options(load_only(User.name, User.fullname))
Example - given a relationship ``User.addresses -> Address``, specify
subquery loading for the ``User.addresses`` collection, but on each
``Address`` object load only the ``email_address`` attribute::
session.query(User).options(
subqueryload(User.addresses).load_only(Address.email_address)
)
For a statement that has multiple entities,
the lead entity can be
specifically referred to using the :class:`_orm.Load` constructor::
stmt = (
select(User, Address)
.join(User.addresses)
.options(
Load(User).load_only(User.name, User.fullname),
Load(Address).load_only(Address.email_address),
)
)
When used together with the
:ref:`populate_existing <orm_queryguide_populate_existing>`
execution option only the attributes listed will be refreshed.
:param \*attrs: Attributes to be loaded, all others will be deferred.
:param raiseload: raise :class:`.InvalidRequestError` rather than
lazy loading a value when a deferred attribute is accessed. Used
to prevent unwanted SQL from being emitted.
.. versionadded:: 2.0
.. seealso::
:ref:`orm_queryguide_column_deferral` - in the
:ref:`queryguide_toplevel`
:param \*attrs: Attributes to be loaded, all others will be deferred.
:param raiseload: raise :class:`.InvalidRequestError` rather than
lazy loading a value when a deferred attribute is accessed. Used
to prevent unwanted SQL from being emitted.
.. versionadded:: 2.0
"""
cloned = self._set_column_strategy(
_expand_column_strategy_attrs(attrs),
{"deferred": False, "instrument": True},
)
wildcard_strategy = {"deferred": True, "instrument": True}
if raiseload:
wildcard_strategy["raiseload"] = True
cloned = cloned._set_column_strategy(
("*",),
wildcard_strategy,
)
return cloned
def joinedload(
self,
attr: _AttrType,
innerjoin: Optional[bool] = None,
) -> Self:
"""Indicate that the given attribute should be loaded using joined
eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# joined-load the "orders" collection on "User"
select(User).options(joinedload(User.orders))
# joined-load Order.items and then Item.keywords
select(Order).options(joinedload(Order.items).joinedload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# joined-load the keywords collection
select(Order).options(lazyload(Order.items).joinedload(Item.keywords))
:param innerjoin: if ``True``, indicates that the joined eager load
should use an inner join instead of the default of left outer join::
select(Order).options(joinedload(Order.user, innerjoin=True))
In order to chain multiple eager joins together where some may be
OUTER and others INNER, right-nested joins are used to link them::
select(A).options(
joinedload(A.bs, innerjoin=False).joinedload(B.cs, innerjoin=True)
)
The above query, linking A.bs via "outer" join and B.cs via "inner"
join would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When
using older versions of SQLite (< 3.7.16), this form of JOIN is
translated to use full subqueries as this syntax is otherwise not
directly supported.
The ``innerjoin`` flag can also be stated with the term ``"unnested"``.
This indicates that an INNER JOIN should be used, *unless* the join
is linked to a LEFT OUTER JOIN to the left, in which case it
will render as LEFT OUTER JOIN. For example, supposing ``A.bs``
is an outerjoin::
select(A).options(joinedload(A.bs).joinedload(B.cs, innerjoin="unnested"))
The above join will render as "a LEFT OUTER JOIN b LEFT OUTER JOIN c",
rather than as "a LEFT OUTER JOIN (b JOIN c)".
.. note:: The "unnested" flag does **not** affect the JOIN rendered
from a many-to-many association table, e.g. a table configured as
:paramref:`_orm.relationship.secondary`, to the target table; for
correctness of results, these joins are always INNER and are
therefore right-nested if linked to an OUTER join.
.. note::
The joins produced by :func:`_orm.joinedload` are **anonymously
aliased**. The criteria by which the join proceeds cannot be
modified, nor can the ORM-enabled :class:`_sql.Select` or legacy
:class:`_query.Query` refer to these joins in any way, including
ordering. See :ref:`zen_of_eager_loading` for further detail.
To produce a specific SQL JOIN which is explicitly available, use
:meth:`_sql.Select.join` and :meth:`_query.Query.join`. To combine
explicit JOINs with eager loading of collections, use
:func:`_orm.contains_eager`; see :ref:`contains_eager`.
.. seealso::
:ref:`loading_toplevel`
:ref:`joined_eager_loading`
""" # noqa: E501
loader = self._set_relationship_strategy(
attr,
{"lazy": "joined"},
opts=(
{"innerjoin": innerjoin}
if innerjoin is not None
else util.EMPTY_DICT
),
)
return loader
def subqueryload(self, attr: _AttrType) -> Self:
"""Indicate that the given attribute should be loaded using
subquery eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# subquery-load the "orders" collection on "User"
select(User).options(subqueryload(User.orders))
# subquery-load Order.items and then Item.keywords
select(Order).options(
subqueryload(Order.items).subqueryload(Item.keywords)
)
# lazily load Order.items, but when Items are loaded,
# subquery-load the keywords collection
select(Order).options(lazyload(Order.items).subqueryload(Item.keywords))
.. seealso::
:ref:`loading_toplevel`
:ref:`subquery_eager_loading`
"""
return self._set_relationship_strategy(attr, {"lazy": "subquery"})
def selectinload(
self,
attr: _AttrType,
recursion_depth: Optional[int] = None,
) -> Self:
"""Indicate that the given attribute should be loaded using
SELECT IN eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# selectin-load the "orders" collection on "User"
select(User).options(selectinload(User.orders))
# selectin-load Order.items and then Item.keywords
select(Order).options(
selectinload(Order.items).selectinload(Item.keywords)
)
# lazily load Order.items, but when Items are loaded,
# selectin-load the keywords collection
select(Order).options(lazyload(Order.items).selectinload(Item.keywords))
:param recursion_depth: optional int; when set to a positive integer
in conjunction with a self-referential relationship,
indicates "selectin" loading will continue that many levels deep
automatically until no items are found.
.. note:: The :paramref:`_orm.selectinload.recursion_depth` option
currently supports only self-referential relationships. There
is not yet an option to automatically traverse recursive structures
with more than one relationship involved.
Additionally, the :paramref:`_orm.selectinload.recursion_depth`
parameter is new and experimental and should be treated as "alpha"
status for the 2.0 series.
.. versionadded:: 2.0 added
:paramref:`_orm.selectinload.recursion_depth`
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
return self._set_relationship_strategy(
attr,
{"lazy": "selectin"},
opts={"recursion_depth": recursion_depth},
)
def lazyload(self, attr: _AttrType) -> Self:
"""Indicate that the given attribute should be loaded using "lazy"
loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`lazy_loading`
"""
return self._set_relationship_strategy(attr, {"lazy": "select"})
def immediateload(
self,
attr: _AttrType,
recursion_depth: Optional[int] = None,
) -> Self:
"""Indicate that the given attribute should be loaded using
an immediate load with a per-attribute SELECT statement.
The load is achieved using the "lazyloader" strategy and does not
fire off any additional eager loaders.
The :func:`.immediateload` option is superseded in general
by the :func:`.selectinload` option, which performs the same task
more efficiently by emitting a SELECT for all loaded objects.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
:param recursion_depth: optional int; when set to a positive integer
in conjunction with a self-referential relationship,
indicates "selectin" loading will continue that many levels deep
automatically until no items are found.
.. note:: The :paramref:`_orm.immediateload.recursion_depth` option
currently supports only self-referential relationships. There
is not yet an option to automatically traverse recursive structures
with more than one relationship involved.
.. warning:: This parameter is new and experimental and should be
treated as "alpha" status
.. versionadded:: 2.0 added
:paramref:`_orm.immediateload.recursion_depth`
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
loader = self._set_relationship_strategy(
attr,
{"lazy": "immediate"},
opts={"recursion_depth": recursion_depth},
)
return loader
@util.deprecated(
"2.1",
"The :func:`_orm.noload` option is deprecated and will be removed "
"in a future release. This option "
"produces incorrect results by returning ``None`` for related "
"items.",
)
def noload(self, attr: _AttrType) -> Self:
"""Indicate that the given relationship attribute should remain
unloaded.
The relationship attribute will return ``None`` when accessed without
producing any loading effect.
:func:`_orm.noload` applies to :func:`_orm.relationship` attributes
only.
.. seealso::
:ref:`loading_toplevel`
"""
return self._set_relationship_strategy(attr, {"lazy": "noload"})
def raiseload(self, attr: _AttrType, sql_only: bool = False) -> Self:
"""Indicate that the given attribute should raise an error if accessed.
A relationship attribute configured with :func:`_orm.raiseload` will
raise an :exc:`~sqlalchemy.exc.InvalidRequestError` upon access. The
typical way this is useful is when an application is attempting to
ensure that all relationship attributes that are accessed in a
particular context would have been already loaded via eager loading.
Instead of having to read through SQL logs to ensure lazy loads aren't
occurring, this strategy will cause them to raise immediately.
:func:`_orm.raiseload` applies to :func:`_orm.relationship` attributes
only. In order to apply raise-on-SQL behavior to a column-based
attribute, use the :paramref:`.orm.defer.raiseload` parameter on the
:func:`.defer` loader option.
:param sql_only: if True, raise only if the lazy load would emit SQL,
but not if it is only checking the identity map, or determining that
the related value should just be None due to missing keys. When False,
the strategy will raise for all varieties of relationship loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`prevent_lazy_with_raiseload`
:ref:`orm_queryguide_deferred_raiseload`
"""
return self._set_relationship_strategy(
attr, {"lazy": "raise_on_sql" if sql_only else "raise"}
)
def defaultload(self, attr: _AttrType) -> Self:
"""Indicate an attribute should load using its predefined loader style.
The behavior of this loading option is to not change the current
loading style of the attribute, meaning that the previously configured
one is used or, if no previous style was selected, the default
loading will be used.
This method is used to link to other loader options further into
a chain of attributes without altering the loader style of the links
along the chain. For example, to set joined eager loading for an
element of an element::
session.query(MyClass).options(
defaultload(MyClass.someattribute).joinedload(
MyOtherClass.someotherattribute
)
)
:func:`.defaultload` is also useful for setting column-level options on
a related class, namely that of :func:`.defer` and :func:`.undefer`::
session.scalars(
select(MyClass).options(
defaultload(MyClass.someattribute)
.defer("some_column")
.undefer("some_other_column")
)
)
.. seealso::
:ref:`orm_queryguide_relationship_sub_options`
:meth:`_orm.Load.options`
"""
return self._set_relationship_strategy(attr, None)
def defer(self, key: _AttrType, raiseload: bool = False) -> Self:
r"""Indicate that the given column-oriented attribute should be
deferred, e.g. not loaded until accessed.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
e.g.::
from sqlalchemy.orm import defer
session.query(MyClass).options(
defer(MyClass.attribute_one), defer(MyClass.attribute_two)
)
To specify a deferred load of an attribute on a related class,
the path can be specified one token at a time, specifying the loading
style for each link along the chain. To leave the loading style
for a link unchanged, use :func:`_orm.defaultload`::
session.query(MyClass).options(
defaultload(MyClass.someattr).defer(RelatedClass.some_column)
)
Multiple deferral options related to a relationship can be bundled
at once using :meth:`_orm.Load.options`::
select(MyClass).options(
defaultload(MyClass.someattr).options(
defer(RelatedClass.some_column),
defer(RelatedClass.some_other_column),
defer(RelatedClass.another_column),
)
)
:param key: Attribute to be deferred.
:param raiseload: raise :class:`.InvalidRequestError` rather than
lazy loading a value when the deferred attribute is accessed. Used
to prevent unwanted SQL from being emitted.
.. versionadded:: 1.4
.. seealso::
:ref:`orm_queryguide_column_deferral` - in the
:ref:`queryguide_toplevel`
:func:`_orm.load_only`
:func:`_orm.undefer`
"""
strategy = {"deferred": True, "instrument": True}
if raiseload:
strategy["raiseload"] = True
return self._set_column_strategy(
_expand_column_strategy_attrs((key,)), strategy
)
def undefer(self, key: _AttrType) -> Self:
r"""Indicate that the given column-oriented attribute should be
undeferred, e.g. specified within the SELECT statement of the entity
as a whole.
The column being undeferred is typically set up on the mapping as a
:func:`.deferred` attribute.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
Examples::
# undefer two columns
session.query(MyClass).options(
undefer(MyClass.col1), undefer(MyClass.col2)
)
# undefer all columns specific to a single class using Load + *
session.query(MyClass, MyOtherClass).options(Load(MyClass).undefer("*"))
# undefer a column on a related object
select(MyClass).options(defaultload(MyClass.items).undefer(MyClass.text))
:param key: Attribute to be undeferred.
.. seealso::
:ref:`orm_queryguide_column_deferral` - in the
:ref:`queryguide_toplevel`
:func:`_orm.defer`
:func:`_orm.undefer_group`
""" # noqa: E501
return self._set_column_strategy(
_expand_column_strategy_attrs((key,)),
{"deferred": False, "instrument": True},
)
def undefer_group(self, name: str) -> Self:
"""Indicate that columns within the given deferred group name should be
undeferred.
The columns being undeferred are set up on the mapping as
:func:`.deferred` attributes and include a "group" name.
E.g::
session.query(MyClass).options(undefer_group("large_attrs"))
To undefer a group of attributes on a related entity, the path can be
spelled out using relationship loader options, such as
:func:`_orm.defaultload`::
select(MyClass).options(
defaultload("someattr").undefer_group("large_attrs")
)
.. seealso::
:ref:`orm_queryguide_column_deferral` - in the
:ref:`queryguide_toplevel`
:func:`_orm.defer`
:func:`_orm.undefer`
"""
return self._set_column_strategy(
(_WILDCARD_TOKEN,), None, {f"undefer_group_{name}": True}
)
def with_expression(
self,
key: _AttrType,
expression: _ColumnExpressionArgument[Any],
) -> Self:
r"""Apply an ad-hoc SQL expression to a "deferred expression"
attribute.
This option is used in conjunction with the
:func:`_orm.query_expression` mapper-level construct that indicates an
attribute which should be the target of an ad-hoc SQL expression.
E.g.::
stmt = select(SomeClass).options(
with_expression(SomeClass.x_y_expr, SomeClass.x + SomeClass.y)
)
:param key: Attribute to be populated
:param expr: SQL expression to be applied to the attribute.
.. seealso::
:ref:`orm_queryguide_with_expression` - background and usage
examples
"""
expression = _orm_full_deannotate(
coercions.expect(roles.LabeledColumnExprRole, expression)
)
return self._set_column_strategy(
(key,), {"query_expression": True}, extra_criteria=(expression,)
)
def selectin_polymorphic(self, classes: Iterable[Type[Any]]) -> Self:
"""Indicate an eager load should take place for all attributes
specific to a subclass.
This uses an additional SELECT with IN against all matched primary
key values, and is the per-query analogue to the ``"selectin"``
setting on the :paramref:`.mapper.polymorphic_load` parameter.
.. seealso::
:ref:`polymorphic_selectin`
"""
self = self._set_class_strategy(
{"selectinload_polymorphic": True},
opts={
"entities": tuple(
sorted((inspect(cls) for cls in classes), key=id)
)
},
)
return self
@overload
def _coerce_strat(self, strategy: _StrategySpec) -> _StrategyKey: ...
@overload
def _coerce_strat(self, strategy: Literal[None]) -> None: ...
def _coerce_strat(
self, strategy: Optional[_StrategySpec]
) -> Optional[_StrategyKey]:
if strategy is not None:
strategy_key = tuple(sorted(strategy.items()))
else:
strategy_key = None
return strategy_key
@_generative
def _set_relationship_strategy(
self,
attr: _AttrType,
strategy: Optional[_StrategySpec],
propagate_to_loaders: bool = True,
opts: Optional[_OptsType] = None,
_reconcile_to_other: Optional[bool] = None,
) -> Self:
strategy_key = self._coerce_strat(strategy)
self._clone_for_bind_strategy(
(attr,),
strategy_key,
_RELATIONSHIP_TOKEN,
opts=opts,
propagate_to_loaders=propagate_to_loaders,
reconcile_to_other=_reconcile_to_other,
)
return self
@_generative
def _set_column_strategy(
self,
attrs: Tuple[_AttrType, ...],
strategy: Optional[_StrategySpec],
opts: Optional[_OptsType] = None,
extra_criteria: Optional[Tuple[Any, ...]] = None,
) -> Self:
strategy_key = self._coerce_strat(strategy)
self._clone_for_bind_strategy(
attrs,
strategy_key,
_COLUMN_TOKEN,
opts=opts,
attr_group=attrs,
extra_criteria=extra_criteria,
)
return self
@_generative
def _set_generic_strategy(
self,
attrs: Tuple[_AttrType, ...],
strategy: _StrategySpec,
_reconcile_to_other: Optional[bool] = None,
) -> Self:
strategy_key = self._coerce_strat(strategy)
self._clone_for_bind_strategy(
attrs,
strategy_key,
None,
propagate_to_loaders=True,
reconcile_to_other=_reconcile_to_other,
)
return self
@_generative
def _set_class_strategy(
self, strategy: _StrategySpec, opts: _OptsType
) -> Self:
strategy_key = self._coerce_strat(strategy)
self._clone_for_bind_strategy(None, strategy_key, None, opts=opts)
return self
def _apply_to_parent(self, parent: Load) -> None:
"""apply this :class:`_orm._AbstractLoad` object as a sub-option o
a :class:`_orm.Load` object.
Implementation is provided by subclasses.
"""
raise NotImplementedError()
def options(self, *opts: _AbstractLoad) -> Self:
r"""Apply a series of options as sub-options to this
:class:`_orm._AbstractLoad` object.
Implementation is provided by subclasses.
"""
raise NotImplementedError()
def _clone_for_bind_strategy(
self,
attrs: Optional[Tuple[_AttrType, ...]],
strategy: Optional[_StrategyKey],
wildcard_key: Optional[_WildcardKeyType],
opts: Optional[_OptsType] = None,
attr_group: Optional[_AttrGroupType] = None,
propagate_to_loaders: bool = True,
reconcile_to_other: Optional[bool] = None,
extra_criteria: Optional[Tuple[Any, ...]] = None,
) -> Self:
raise NotImplementedError()
def process_compile_state_replaced_entities(
self,
compile_state: _ORMCompileState,
mapper_entities: Sequence[_MapperEntity],
) -> None:
if not compile_state.compile_options._enable_eagerloads:
return
# process is being run here so that the options given are validated
# against what the lead entities were, as well as to accommodate
# for the entities having been replaced with equivalents
self._process(
compile_state,
mapper_entities,
not bool(compile_state.current_path),
)
def process_compile_state(self, compile_state: _ORMCompileState) -> None:
if not compile_state.compile_options._enable_eagerloads:
return
self._process(
compile_state,
compile_state._lead_mapper_entities,
not bool(compile_state.current_path)
and not compile_state.compile_options._for_refresh_state,
)
def _process(
self,
compile_state: _ORMCompileState,
mapper_entities: Sequence[_MapperEntity],
raiseerr: bool,
) -> None:
"""implemented by subclasses"""
raise NotImplementedError()
@classmethod
def _chop_path(
cls,
to_chop: _PathRepresentation,
path: PathRegistry,
debug: bool = False,
) -> Optional[_PathRepresentation]:
i = -1
for i, (c_token, p_token) in enumerate(
zip(to_chop, path.natural_path)
):
if isinstance(c_token, str):
if i == 0 and (
c_token.endswith(f":{_DEFAULT_TOKEN}")
or c_token.endswith(f":{_WILDCARD_TOKEN}")
):
return to_chop
elif (
c_token != f"{_RELATIONSHIP_TOKEN}:{_WILDCARD_TOKEN}"
and c_token != p_token.key # type: ignore
):
return None
if c_token is p_token:
continue
elif (
isinstance(c_token, InspectionAttr)
and insp_is_mapper(c_token)
and insp_is_mapper(p_token)
and c_token.isa(p_token)
):
continue
else:
return None
return to_chop[i + 1 :]
| _AbstractLoad |
python | allegroai__clearml | clearml/backend_api/services/v2_23/models.py | {
"start": 85716,
"end": 93045
} | class ____(Response):
"""
Response of models.get_all endpoint.
:param models: Models list
:type models: Sequence[Model]
:param scroll_id: Scroll ID that can be used with the next calls to get_all to
retrieve more data
:type scroll_id: str
"""
_service = "models"
_action = "get_all"
_version = "2.23"
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": ["string", "null"],
},
"type": {
"description": "The type of the metadata item",
"type": ["string", "null"],
},
"value": {
"description": "The value stored in the metadata item",
"type": ["string", "null"],
},
},
"type": "object",
},
"model": {
"properties": {
"comment": {
"description": "Model comment",
"type": ["string", "null"],
},
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Model creation time",
"format": "date-time",
"type": ["string", "null"],
},
"design": {
"additionalProperties": True,
"description": "Json object representing the model design. Should be identical to the network design of the task which created the model",
"type": ["object", "null"],
},
"framework": {
"description": "Framework on which the model is based. Should be identical to the framework of the task which created the model",
"type": ["string", "null"],
},
"id": {"description": "Model id", "type": ["string", "null"]},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.",
"type": ["object", "null"],
},
"last_update": {
"description": "Model last update time",
"format": "date-time",
"type": ["string", "null"],
},
"metadata": {
"additionalProperties": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
"type": ["object", "null"],
},
"name": {"description": "Model name", "type": ["string", "null"]},
"parent": {
"description": "Parent model ID",
"type": ["string", "null"],
},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
"ready": {
"description": "Indication if the model is final and can be used by other tasks",
"type": ["boolean", "null"],
},
"stats": {
"description": "Model statistics",
"properties": {
"labels_count": {
"description": "Number of the model labels",
"type": "integer",
}
},
"type": ["object", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task ID of task in which the model was created",
"type": ["string", "null"],
},
"ui_cache": {
"additionalProperties": True,
"description": "UI cache for this model",
"type": ["object", "null"],
},
"uri": {
"description": "URI for the model, pointing to the destination storage.",
"type": ["string", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"models": {
"description": "Models list",
"items": {"$ref": "#/definitions/model"},
"type": ["array", "null"],
},
"scroll_id": {
"description": "Scroll ID that can be used with the next calls to get_all to retrieve more data",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, models: Optional[List[Any]] = None, scroll_id: Optional[str] = None, **kwargs: Any) -> None:
super(GetAllResponse, self).__init__(**kwargs)
self.models = models
self.scroll_id = scroll_id
@schema_property("models")
def models(self) -> Optional[List[Any]]:
return self._property_models
@models.setter
def models(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_models = None
return
self.assert_isinstance(value, "models", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [Model.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "models", Model, is_array=True)
self._property_models = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetAllResponse |
python | paramiko__paramiko | paramiko/win_pageant.py | {
"start": 1874,
"end": 3470
} | class ____(ctypes.Structure):
"""
ctypes implementation of
http://msdn.microsoft.com/en-us/library/windows/desktop/ms649010%28v=vs.85%29.aspx
"""
_fields_ = [
("num_data", ULONG_PTR),
("data_size", ctypes.wintypes.DWORD),
("data_loc", ctypes.c_void_p),
]
def _query_pageant(msg):
"""
Communication with the Pageant process is done through a shared
memory-mapped file.
"""
hwnd = _get_pageant_window_object()
if not hwnd:
# Raise a failure to connect exception, pageant isn't running anymore!
return None
# create a name for the mmap
map_name = f"PageantRequest{thread.get_ident():08x}"
pymap = _winapi.MemoryMap(
map_name, _AGENT_MAX_MSGLEN, _winapi.get_security_attributes_for_user()
)
with pymap:
pymap.write(msg)
# Create an array buffer containing the mapped filename
char_buffer = array.array("b", b(map_name) + zero_byte) # noqa
char_buffer_address, char_buffer_size = char_buffer.buffer_info()
# Create a string to use for the SendMessage function call
cds = COPYDATASTRUCT(
_AGENT_COPYDATA_ID, char_buffer_size, char_buffer_address
)
response = ctypes.windll.user32.SendMessageA(
hwnd, win32con_WM_COPYDATA, ctypes.sizeof(cds), ctypes.byref(cds)
)
if response > 0:
pymap.seek(0)
datalen = pymap.read(4)
retlen = struct.unpack(">I", datalen)[0]
return datalen + pymap.read(retlen)
return None
| COPYDATASTRUCT |
python | pypa__warehouse | tests/unit/rate_limiting/test_core.py | {
"start": 228,
"end": 3355
} | class ____:
def test_basic(self, metrics):
limiter = RateLimiter(
storage.MemoryStorage(),
"1 per minute",
identifiers=["foo"],
metrics=metrics,
)
assert limiter.test("foo")
assert limiter.test("bar")
while limiter.hit("bar"):
pass
assert limiter.test("foo")
assert not limiter.test("bar")
def test_error(self, metrics):
limiter = RateLimiter(
storage.MemoryStorage(),
"1 per minute",
identifiers=["foo"],
metrics=metrics,
)
def raiser(*args, **kwargs):
raise redis.ConnectionError()
limiter._window = pretend.stub(hit=raiser, test=raiser, get_window_stats=raiser)
assert limiter.test("foo")
assert limiter.hit("foo")
assert limiter.resets_in("foo") is None
assert metrics.increment.calls == [
pretend.call("warehouse.ratelimiter.error", tags=["call:test"]),
pretend.call("warehouse.ratelimiter.error", tags=["call:hit"]),
pretend.call("warehouse.ratelimiter.error", tags=["call:resets_in"]),
]
def test_namespacing(self, metrics):
storage_ = storage.MemoryStorage()
limiter1 = RateLimiter(
storage_, "1 per minute", identifiers=["foo"], metrics=metrics
)
limiter2 = RateLimiter(storage_, "1 per minute", metrics=metrics)
assert limiter1.test("bar")
assert limiter2.test("bar")
while limiter1.hit("bar"):
pass
assert limiter2.test("bar")
assert not limiter1.test("bar")
def test_clear(self, metrics):
limiter = RateLimiter(storage.MemoryStorage(), "1 per minute", metrics=metrics)
assert limiter.test("foo")
while limiter.hit("foo"):
pass
assert not limiter.test("foo")
limiter.clear("foo")
assert limiter.test("foo")
def test_resets_in(self, metrics):
limiter = RateLimiter(storage.MemoryStorage(), "1 per minute", metrics=metrics)
assert limiter.resets_in("foo") is None
while limiter.hit("foo"):
pass
assert limiter.resets_in("foo") > datetime.timedelta(seconds=0)
assert limiter.resets_in("foo") < datetime.timedelta(seconds=60)
def test_resets_in_expired(self, metrics):
limiter = RateLimiter(
storage.MemoryStorage(),
"1 per minute; 1 per hour; 1 per day",
metrics=metrics,
)
current = datetime.datetime.now(tz=datetime.UTC)
stats = iter(
[
(0, 0),
((current + datetime.timedelta(seconds=60)).timestamp(), 0),
((current + datetime.timedelta(seconds=5)).timestamp(), 0),
]
)
limiter._window = pretend.stub(get_window_stats=lambda L, *a: next(stats))
resets_in = limiter.resets_in("foo")
assert resets_in > datetime.timedelta(seconds=0)
assert resets_in <= datetime.timedelta(seconds=5)
| TestRateLimiter |
python | coleifer__peewee | tests/shortcuts.py | {
"start": 1214,
"end": 1387
} | class ____(TestModel):
name = TextField()
students = ManyToManyField(Student, through_model=StudentCourseProxy,
backref='courses')
| Course |
python | django__django | tests/migrations/test_migrations_squashed_ref_squashed/app1/3_auto.py | {
"start": 35,
"end": 134
} | class ____(migrations.Migration):
dependencies = [("app1", "2_auto"), ("app2", "2_auto")]
| Migration |
python | python-openxml__python-docx | tests/oxml/test_xmlchemy.py | {
"start": 4696,
"end": 7734
} | class ____:
def it_parses_a_line_to_help_compare(self, parse_fixture):
"""
This internal function is important to test separately because if it
doesn't parse a line properly, false equality can result.
"""
line, expected_front, expected_attrs = parse_fixture[:3]
expected_close, expected_text = parse_fixture[3:]
front, attrs, close, text = XmlString._parse_line(line)
# print("'%s' '%s' '%s' %s" % (
# front, attrs, close, ('%s' % text) if text else text))
assert front == expected_front
assert attrs == expected_attrs
assert close == expected_close
assert text == expected_text
def it_knows_if_two_xml_lines_are_equivalent(self, xml_line_case):
line, other, differs = xml_line_case
xml = XmlString(line)
assert xml == other
assert xml != differs
# fixtures ---------------------------------------------
@pytest.fixture(
params=[
("<a>text</a>", "<a", "", ">", "text</a>"),
("<a:f/>", "<a:f", "", "/>", None),
('<a:f b="c"/>', "<a:f", ' b="c"', "/>", None),
("<a:f>t</a:f>", "<a:f", "", ">", "t</a:f>"),
(
'<dcterms:created xsi:type="dcterms:W3CDTF">2013-12-23T23:15:00Z</dcterms:created>',
"<dcterms:created",
' xsi:type="dcterms:W3CDTF"',
">",
"2013-12-23T23:15:00Z</dcterms:created>",
),
]
)
def parse_fixture(self, request):
line, front, attrs, close, text = request.param
return line, front, attrs, close, text
@pytest.fixture(
params=[
"simple_elm",
"nsp_tagname",
"indent",
"attrs",
"nsdecl_order",
"closing_elm",
]
)
def xml_line_case(self, request):
cases = {
"simple_elm": (
"<name/>",
"<name/>",
"<name>",
),
"nsp_tagname": (
"<xyz:name/>",
"<xyz:name/>",
"<abc:name/>",
),
"indent": (
" <xyz:name/>",
" <xyz:name/>",
"<xyz:name/>",
),
"attrs": (
' <abc:Name foo="bar" bar="foo">',
' <abc:Name bar="foo" foo="bar">',
' <abc:Name far="boo" foo="bar">',
),
"nsdecl_order": (
' <name xmlns:a="http://ns/1" xmlns:b="http://ns/2"/>',
' <name xmlns:b="http://ns/2" xmlns:a="http://ns/1"/>',
' <name xmlns:b="http://ns/2" xmlns:a="http://ns/1">',
),
"closing_elm": (
"</xyz:name>",
"</xyz:name>",
"<xyz:name>",
),
}
line, other, differs = cases[request.param]
return line, other, differs
| DescribeXmlString |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-firebolt/destination_firebolt/writer.py | {
"start": 2523,
"end": 6660
} | class ____(FireboltWriter):
"""
Data writer using the S3 strategy. Data is buffered in memory
before being flushed to S3 in .parquet format. At the end of
the operation data is written to Firebolt databse from S3, allowing
greater ingestion speed.
"""
flush_interval = 100000
def __init__(self, connection: Connection, s3_bucket: str, access_key: str, secret_key: str, s3_region: str) -> None:
"""
:param connection: Firebolt SDK connection class with established connection
to the databse.
:param s3_bucket: Intermediate bucket to store the data files before writing them to Firebolt.
Has to be created and accessible.
:param access_key: AWS Access Key ID that has read/write/delete permissions on the files in the bucket.
:param secret_key: Corresponding AWS Secret Key.
:param s3_region: S3 region. Best to keep this the same as Firebolt database region. Default us-east-1.
"""
super().__init__(connection)
self.key_id = access_key
self.secret_key = secret_key
self.s3_bucket = s3_bucket
self._updated_tables = set()
self.unique_dir = f"{int(time())}_{uuid4()}"
self.fs = fs.S3FileSystem(access_key=access_key, secret_key=secret_key, region=s3_region)
def _flush(self) -> None:
"""
Intermediate data flush that's triggered during the
buffering operation. Uploads data stored in memory to the S3.
"""
for table, data in self._buffer.items():
key_list, ts_list, payload = zip(*data)
upload_data = [pa.array(key_list), pa.array(ts_list), pa.array(payload)]
pa_table = pa.table(upload_data, names=["_airbyte_ab_id", "_airbyte_emitted_at", "_airbyte_data"])
pq.write_to_dataset(table=pa_table, root_path=f"{self.s3_bucket}/airbyte_output/{self.unique_dir}/{table}", filesystem=self.fs)
# Update tables
self._updated_tables.update(self._buffer.keys())
self._buffer.clear()
self._values = 0
def flush(self) -> None:
"""
Flush any leftover data after ingestion and write from S3 to Firebolt.
Intermediate data on S3 and External Table will be deleted after write is complete.
"""
self._flush()
for table in self._updated_tables:
self.create_raw_table(table)
self.create_external_table(table)
self.ingest_data(table)
self.cleanup(table)
def create_external_table(self, name: str) -> None:
"""
Create Firebolt External Table to interface with the files on S3.
:param name: Stream name from which the table name is derived.
"""
query = f"""
CREATE EXTERNAL TABLE IF NOT EXISTS ex_airbyte_raw_{name} (
_airbyte_ab_id TEXT,
_airbyte_emitted_at TIMESTAMP,
_airbyte_data TEXT
)
URL = ?
CREDENTIALS = ( AWS_KEY_ID = ? AWS_SECRET_KEY = ? )
OBJECT_PATTERN = '*.parquet'
TYPE = (PARQUET);
"""
cursor = self.connection.cursor()
cursor.execute(query, parameters=(f"s3://{self.s3_bucket}/airbyte_output/{self.unique_dir}/{name}", self.key_id, self.secret_key))
def ingest_data(self, name: str) -> None:
"""
Write data from External Table to the _airbyte_raw table effectively
persisting data in Firebolt.
:param name: Stream name from which the table name is derived.
"""
query = f"INSERT INTO _airbyte_raw_{name} SELECT * FROM ex_airbyte_raw_{name}"
cursor = self.connection.cursor()
cursor.execute(query)
def cleanup(self, name: str) -> None:
"""
Clean intermediary External tables and wipe the S3 folder.
:param name: Stream name from which the table name is derived.
"""
cursor = self.connection.cursor()
cursor.execute(f"DROP TABLE IF EXISTS ex_airbyte_raw_{name}")
self.fs.delete_dir_contents(f"{self.s3_bucket}/airbyte_output/{self.unique_dir}/{name}")
| FireboltS3Writer |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_ssm.py | {
"start": 1169,
"end": 5689
} | class ____:
@pytest.fixture(
autouse=True,
params=[
pytest.param("String", id="unencrypted-string"),
pytest.param("SecureString", id="encrypted-string"),
],
)
def setup_tests(self, request):
with mock_aws():
self.hook = SsmHook(region_name=REGION)
self.param_type = request.param
self.hook.conn.put_parameter(
Type=self.param_type, Name=EXISTING_PARAM_NAME, Value=PARAM_VALUE, Overwrite=True
)
yield
def test_hook(self) -> None:
assert self.hook.conn is not None
assert self.hook.aws_conn_id == DEFAULT_CONN_ID
assert self.hook.region_name == REGION
@pytest.mark.parametrize(
("param_name", "default_value", "expected_result"),
[
pytest.param(EXISTING_PARAM_NAME, None, PARAM_VALUE, id="param_exists_no_default_provided"),
pytest.param(EXISTING_PARAM_NAME, DEFAULT_VALUE, PARAM_VALUE, id="param_exists_with_default"),
pytest.param(
BAD_PARAM_NAME, DEFAULT_VALUE, DEFAULT_VALUE, id="param_does_not_exist_uses_default"
),
],
)
def test_get_parameter_value_happy_cases(self, param_name, default_value, expected_result) -> None:
if default_value:
assert self.hook.get_parameter_value(param_name, default=default_value) == expected_result
else:
assert self.hook.get_parameter_value(param_name) == expected_result
@mock.patch("airflow.providers.amazon.aws.hooks.ssm.mask_secret")
def test_get_parameter_masking(self, mock_masker: mock.MagicMock):
self.hook.get_parameter_value(EXISTING_PARAM_NAME)
if self.param_type == "SecureString":
mock_masker.assert_called_once_with(PARAM_VALUE)
else:
mock_masker.assert_not_called()
def test_get_parameter_value_param_does_not_exist_no_default_provided(self) -> None:
with pytest.raises(botocore.exceptions.ClientError) as raised_exception:
self.hook.get_parameter_value(BAD_PARAM_NAME)
error = raised_exception.value.response["Error"]
assert error["Code"] == "ParameterNotFound"
assert BAD_PARAM_NAME in error["Message"]
@mock.patch("airflow.providers.amazon.aws.hooks.ssm.SsmHook.conn", new_callable=mock.PropertyMock)
def test_get_command_invocation(self, mock_conn):
command_id = "12345678-1234-1234-1234-123456789012"
instance_id = "i-1234567890abcdef0"
expected_response = {
"CommandId": command_id,
"InstanceId": instance_id,
"Status": "Success",
"ResponseCode": 0,
"StandardOutputContent": "Hello World",
"StandardErrorContent": "",
}
mock_conn.return_value.get_command_invocation.return_value = expected_response
result = self.hook.get_command_invocation(command_id, instance_id)
mock_conn.return_value.get_command_invocation.assert_called_once_with(
CommandId=command_id, InstanceId=instance_id
)
assert result == expected_response
@mock.patch("airflow.providers.amazon.aws.hooks.ssm.SsmHook.conn", new_callable=mock.PropertyMock)
def test_list_command_invocations(self, mock_conn):
command_id = "12345678-1234-1234-1234-123456789012"
expected_invocations = [
{"InstanceId": "i-111", "Status": "Success"},
{"InstanceId": "i-222", "Status": "Failed"},
]
expected_response = {"CommandInvocations": expected_invocations}
mock_conn.return_value.list_command_invocations.return_value = expected_response
result = self.hook.list_command_invocations(command_id)
mock_conn.return_value.list_command_invocations.assert_called_once_with(CommandId=command_id)
assert result == expected_response
@mock.patch("airflow.providers.amazon.aws.hooks.ssm.SsmHook.conn", new_callable=mock.PropertyMock)
def test_list_command_invocations_empty_response(self, mock_conn):
command_id = "12345678-1234-1234-1234-123456789012"
expected_response = {} # No CommandInvocations key
mock_conn.return_value.list_command_invocations.return_value = expected_response
result = self.hook.list_command_invocations(command_id)
mock_conn.return_value.list_command_invocations.assert_called_once_with(CommandId=command_id)
assert result == expected_response
| TestSsmHook |
python | astropy__astropy | astropy/extern/ply/lex.py | {
"start": 22447,
"end": 42905
} | class ____(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
return
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
if lextab in sys.modules:
del sys.modules[lextab]
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| LexerReflect |
python | keras-team__keras | keras/src/layers/preprocessing/category_encoding.py | {
"start": 287,
"end": 6927
} | class ____(DataLayer):
"""A preprocessing layer which encodes integer features.
This layer provides options for condensing data into a categorical encoding
when the total number of tokens are known in advance. It accepts integer
values as inputs, and it outputs a dense or sparse representation of those
inputs. For integer inputs where the total number of tokens is not known,
use `keras.layers.IntegerLookup` instead.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Examples:
**One-hot encoding data**
>>> layer = keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="one_hot")
>>> layer([3, 2, 0, 1])
array([[0., 0., 0., 1.],
[0., 0., 1., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]]>
**Multi-hot encoding data**
>>> layer = keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="multi_hot")
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
array([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 1.]]>
**Using weighted inputs in `"count"` mode**
>>> layer = keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="count")
>>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)
array([[0.1, 0.2, 0. , 0. ],
[0.2, 0. , 0. , 0. ],
[0. , 0.2, 0.3, 0. ],
[0. , 0.2, 0. , 0.4]]>
Args:
num_tokens: The total number of tokens the layer should support. All
inputs to the layer must integers in the range `0 <= value <
num_tokens`, or an error will be thrown.
output_mode: Specification for the output of the layer.
Values can be `"one_hot"`, `"multi_hot"` or `"count"`,
configuring the layer as follows:
- `"one_hot"`: Encodes each individual element in the input
into an array of `num_tokens` size, containing a 1 at the
element index. If the last dimension is size 1, will encode
on that dimension. If the last dimension is not size 1,
will append a new dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single
array of `num_tokens` size, containing a 1 for each
vocabulary term present in the sample. Treats the last
dimension as the sample dimension, if input shape is
`(..., sample_length)`, output shape will be
`(..., num_tokens)`.
- `"count"`: Like `"multi_hot"`, but the int array contains a
count of the number of times the token at that index
appeared in the sample.
For all output modes, currently only output up to rank 2 is
supported.
Defaults to `"multi_hot"`.
sparse: Whether to return a sparse tensor; for backends that support
sparse tensors.
Call arguments:
inputs: A 1D or 2D tensor of integer inputs.
count_weights: A tensor in the same shape as `inputs` indicating the
weight for each sample value when summing up in `count` mode.
Not used in `"multi_hot"` or `"one_hot"` modes.
"""
def __init__(
self, num_tokens=None, output_mode="multi_hot", sparse=False, **kwargs
):
super().__init__(**kwargs)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = "multi_hot"
# 'output_mode' must be one of ("count", "one_hot", "multi_hot")
if output_mode not in ("count", "one_hot", "multi_hot"):
raise ValueError(f"Unknown arg for output_mode: {output_mode}")
if num_tokens is None:
raise ValueError(
"num_tokens must be set to use this layer. If the "
"number of tokens is not known beforehand, use the "
"IntegerLookup layer instead."
)
if num_tokens < 1:
raise ValueError(
f"`num_tokens` must be >= 1. Received: num_tokens={num_tokens}."
)
self.num_tokens = num_tokens
self.output_mode = output_mode
self.sparse = sparse
self._allow_non_tensor_positional_args = True
self._convert_input_args = False
def _encode(self, inputs, count_weights=None):
inputs = self.backend.core.convert_to_tensor(inputs)
return numerical_utils.encode_categorical_inputs(
inputs,
output_mode=self.output_mode,
depth=self.num_tokens,
dtype=self.dtype,
sparse=self.sparse,
count_weights=count_weights,
backend_module=self.backend,
)
def compute_output_shape(self, input_shape):
if (input_shape is not None) & (len(input_shape) == 0):
return (self.num_tokens,)
if self.output_mode == "one_hot":
if input_shape[-1] != 1:
return tuple(input_shape) + (self.num_tokens,)
elif len(input_shape) == 1:
return tuple(input_shape) + (self.num_tokens,)
else:
return tuple(input_shape[:-1]) + (self.num_tokens,)
return tuple(input_shape[:-1]) + (self.num_tokens,)
def compute_output_spec(self, inputs, count_weights=None):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
output_shape, dtype=self.compute_dtype, sparse=self.sparse
)
def get_config(self):
config = {
"num_tokens": self.num_tokens,
"output_mode": self.output_mode,
}
base_config = super().get_config()
return {**base_config, **config}
def call(self, inputs, count_weights=None):
if count_weights is not None:
if self.output_mode != "count":
raise ValueError(
"`count_weights` is not used when `output_mode` is not "
f"`'count'`. Received `count_weights={count_weights}`."
)
count_weights = self.backend.convert_to_tensor(
count_weights, dtype=self.compute_dtype
)
outputs = self._encode(inputs, count_weights)
return backend_utils.convert_tf_tensor(outputs)
| CategoryEncoding |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 13707,
"end": 14657
} | class ____(PlotEvent):
''' Base class for UI events associated with a specific (x,y) point.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
Note that data space coordinates are relative to the default range, not
any extra ranges, and the screen space origin is at the top left of
the HTML canvas.
'''
def __init__(
self,
model: Plot | None,
sx: float | None = None,
sy: float | None = None,
x: float | None = None,
y: float | None = None,
modifiers: KeyModifiers | None = None,
):
self.sx = sx
self.sy = sy
self.x = x
self.y = y
self.modifiers = modifiers
super().__init__(model=model)
| PointEvent |
python | google__pytype | pytype/abstract/mixin.py | {
"start": 6818,
"end": 8640
} | class ____(metaclass=MixinMeta):
"""An annotation containing inner types, such as a Union.
For example, in `Union[int, str]`, `int` and `str` are the annotation's inner
types. Classes that inherit from this mixin should implement:
get_inner_types(): Returns a sequence of (key, typ) of the inner types. A
Union's inner types can be keyed on their position: `[(0, int), (1, str)]`.
update_inner_type(key, typ): Updates the inner type with the given key.
replace(inner_types): Returns a new annotation that is a copy of the current
one but with the given inner types, again as a (key, typ) sequence.
"""
overloads: tuple[str, ...] = ("formal",)
def init_mixin(self) -> None:
self.processed = False
self._seen_for_formal = False # for calculating the 'formal' property
self._formal = None
@property
def formal(self) -> bool:
"""See BaseValue.formal."""
# We can't compute self.formal in __init__ because doing so would force
# evaluation of our type parameters during initialization, possibly
# leading to an infinite loop.
if self._formal is not None:
return self._formal
if self._seen_for_formal:
return False
self._seen_for_formal = True
formal = any(t.formal for _, t in self.get_inner_types())
self._seen_for_formal = False
if self.ctx.vm.late_annotations is None:
# Caching 'formal' is safe once all LateAnnotations have been resolved.
self._formal = formal
return formal
def get_inner_types(self) -> "Iterable[tuple[int | str, _base.BaseValue]]":
raise NotImplementedError()
def update_inner_type(self, key: int, typ: "_base.BaseValue"):
raise NotImplementedError()
def replace(self, inner_types: "Sequence[tuple[int, _base.BaseValue]]"):
raise NotImplementedError()
| NestedAnnotation |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 48720,
"end": 49102
} | class ____(test_util.TensorFlowTestCase):
def test_complex_sign_gradient(self):
with context.eager_mode():
x = math_ops.complex(1., 1.)
with backprop.GradientTape() as t:
t.watch(x)
y = math_ops.sign(x)
self.assertAllClose(
t.gradient(y, x), math_ops.complex(0.353553, -0.353553))
@test_util.run_all_in_graph_and_eager_modes
| SignTest |
python | django-haystack__django-haystack | test_haystack/test_templatetags.py | {
"start": 223,
"end": 565
} | class ____(Highlighter):
def render_html(self, highlight_locations=None, start_offset=None, end_offset=None):
highlighted_chunk = self.text_block[start_offset:end_offset]
for word in self.query_words:
highlighted_chunk = highlighted_chunk.replace(word, "Bork!")
return highlighted_chunk
| BorkHighlighter |
python | has2k1__plotnine | plotnine/geoms/geom_point.py | {
"start": 514,
"end": 4607
} | class ____(geom):
"""
Plot points (Scatter plot)
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {
"alpha": 1,
"color": "black",
"fill": None,
"shape": "o",
"size": 1.5,
"stroke": 0.5,
}
REQUIRED_AES = {"x", "y"}
NON_MISSING_AES = {"color", "shape", "size"}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
}
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
):
"""
Plot all groups
"""
self.draw_group(data, panel_params, coord, ax, self.params)
@staticmethod
def draw_group(
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
params: dict[str, Any],
):
data = coord.transform(data, panel_params)
units = "shape"
for _, udata in data.groupby(units, dropna=False):
udata.reset_index(inplace=True, drop=True)
geom_point.draw_unit(udata, panel_params, coord, ax, params)
@staticmethod
def draw_unit(
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
params: dict[str, Any],
):
# Our size is in 'points' while scatter wants
# 'points^2'. The stroke is outside. And pi
# gives a large enough scaling factor
# All other sizes for which the MPL units should
# be in points must scaled using sqrt(pi)
size = ((data["size"] + data["stroke"]) ** 2) * np.pi
linewidth = data["stroke"] * SIZE_FACTOR
color = to_rgba(data["color"], data["alpha"])
shape = data["shape"].iloc[0]
# It is common to forget that scatter points are
# filled and slip-up by manually assigning to the
# color instead of the fill. We forgive.
if shape in FILLED_SHAPES:
if all(c is None for c in data["fill"]):
fill = color
else:
fill = to_rgba(data["fill"], data["alpha"])
else:
# Assume unfilled
fill = color
color = None
ax.scatter(
x=data["x"],
y=data["y"],
s=size,
facecolor=fill,
edgecolor=color,
linewidth=linewidth,
marker=shape,
zorder=params["zorder"],
rasterized=params["raster"],
)
@staticmethod
def draw_legend(
data: pd.Series[Any], da: DrawingArea, lyr: layer
) -> DrawingArea:
"""
Draw a point in the box
Parameters
----------
data : Series
Data Row
da : DrawingArea
Canvas
lyr : layer
Layer
Returns
-------
out : DrawingArea
"""
from matplotlib.lines import Line2D
if data["fill"] is None:
data["fill"] = data["color"]
size = (data["size"] + data["stroke"]) * SIZE_FACTOR
edgewidth = data["stroke"] * SIZE_FACTOR
fill = to_rgba(data["fill"], data["alpha"])
color = to_rgba(data["color"], data["alpha"])
key = Line2D(
[0.5 * da.width],
[0.5 * da.height],
marker=data["shape"],
markersize=size,
markerfacecolor=fill,
markeredgecolor=color,
markeredgewidth=edgewidth,
)
da.add_artist(key)
return da
@staticmethod
def legend_key_size(
data: pd.Series[Any], min_size: tuple[int, int], lyr: layer
) -> tuple[int, int]:
w, h = min_size
pad_w, pad_h = w * 0.5, h * 0.5
_size = data["size"] * SIZE_FACTOR
_edgewidth = 2 * data["stroke"] * SIZE_FACTOR
_w = _h = _size + _edgewidth
if data["color"] is not None:
w = max(w, _w + pad_w)
h = max(h, _h + pad_h)
return w, h
| geom_point |
python | python__mypy | mypyc/irbuild/function.py | {
"start": 26493,
"end": 48710
} | class ____(NamedTuple):
args: list[Value]
arg_names: list[str | None]
arg_kinds: list[ArgKind]
def get_args(builder: IRBuilder, rt_args: Sequence[RuntimeArg], line: int) -> ArgInfo:
# The environment operates on Vars, so we make some up
fake_vars = [(Var(arg.name), arg.type) for arg in rt_args]
args = [
builder.read(builder.add_local_reg(var, type, is_arg=True), line)
for var, type in fake_vars
]
arg_names = [
arg.name if arg.kind.is_named() or (arg.kind.is_optional() and not arg.pos_only) else None
for arg in rt_args
]
arg_kinds = [arg.kind for arg in rt_args]
return ArgInfo(args, arg_names, arg_kinds)
def gen_glue_method(
builder: IRBuilder,
base_sig: FuncSignature,
target: FuncIR,
cls: ClassIR,
base: ClassIR,
line: int,
do_pycall: bool,
) -> FuncIR:
"""Generate glue methods that mediate between different method types in subclasses.
For example, if we have:
class A:
def f(builder: IRBuilder, x: int) -> object: ...
then it is totally permissible to have a subclass
class B(A):
def f(builder: IRBuilder, x: object) -> int: ...
since '(object) -> int' is a subtype of '(int) -> object' by the usual
contra/co-variant function subtyping rules.
The trickiness here is that int and object have different
runtime representations in mypyc, so A.f and B.f have
different signatures at the native C level. To deal with this,
we need to generate glue methods that mediate between the
different versions by coercing the arguments and return
values.
If do_pycall is True, then make the call using the C API
instead of a native call.
"""
check_native_override(builder, base_sig, target.decl.sig, line)
builder.enter()
builder.ret_types[-1] = base_sig.ret_type
rt_args = list(base_sig.args)
if target.decl.kind == FUNC_NORMAL:
rt_args[0] = RuntimeArg(base_sig.args[0].name, RInstance(cls))
arg_info = get_args(builder, rt_args, line)
args, arg_kinds, arg_names = arg_info.args, arg_info.arg_kinds, arg_info.arg_names
bitmap_args = None
if base_sig.num_bitmap_args:
args = args[: -base_sig.num_bitmap_args]
arg_kinds = arg_kinds[: -base_sig.num_bitmap_args]
arg_names = arg_names[: -base_sig.num_bitmap_args]
bitmap_args = list(builder.builder.args[-base_sig.num_bitmap_args :])
# We can do a passthrough *args/**kwargs with a native call, but if the
# args need to get distributed out to arguments, we just let python handle it
if any(kind.is_star() for kind in arg_kinds) and any(
not arg.kind.is_star() for arg in target.decl.sig.args
):
do_pycall = True
if do_pycall:
if target.decl.kind == FUNC_STATICMETHOD:
# FIXME: this won't work if we can do interpreted subclasses
first = builder.builder.get_native_type(cls)
st = 0
else:
first = args[0]
st = 1
retval = builder.builder.py_method_call(
first, target.name, args[st:], line, arg_kinds[st:], arg_names[st:]
)
else:
retval = builder.builder.call(
target.decl, args, arg_kinds, arg_names, line, bitmap_args=bitmap_args
)
retval = builder.coerce(retval, base_sig.ret_type, line)
builder.add(Return(retval))
arg_regs, _, blocks, ret_type, _ = builder.leave()
if base_sig.num_bitmap_args:
rt_args = rt_args[: -base_sig.num_bitmap_args]
return FuncIR(
FuncDecl(
target.name + "__" + base.name + "_glue",
cls.name,
builder.module_name,
FuncSignature(rt_args, ret_type),
target.decl.kind,
is_coroutine=target.decl.is_coroutine,
),
arg_regs,
blocks,
)
def check_native_override(
builder: IRBuilder, base_sig: FuncSignature, sub_sig: FuncSignature, line: int
) -> None:
"""Report an error if an override changes signature in unsupported ways.
Glue methods can work around many signature changes but not all of them.
"""
for base_arg, sub_arg in zip(base_sig.real_args(), sub_sig.real_args()):
if base_arg.type.error_overlap:
if not base_arg.optional and sub_arg.optional and base_sig.num_bitmap_args:
# This would change the meanings of bits in the argument defaults
# bitmap, which we don't support. We'd need to do tricky bit
# manipulations to support this generally.
builder.error(
"An argument with type "
+ f'"{base_arg.type}" cannot be given a default value in a method override',
line,
)
if base_arg.type.error_overlap or sub_arg.type.error_overlap:
if not is_same_type(base_arg.type, sub_arg.type):
# This would change from signaling a default via an error value to
# signaling a default via bitmap, which we don't support.
builder.error(
"Incompatible argument type "
+ f'"{sub_arg.type}" (base class has type "{base_arg.type}")',
line,
)
def gen_glue_property(
builder: IRBuilder,
sig: FuncSignature,
target: FuncIR,
cls: ClassIR,
base: ClassIR,
line: int,
do_pygetattr: bool,
) -> FuncIR:
"""Generate glue methods for properties that mediate between different subclass types.
Similarly to methods, properties of derived types can be covariantly subtyped. Thus,
properties also require glue. However, this only requires the return type to change.
Further, instead of a method call, an attribute get is performed.
If do_pygetattr is True, then get the attribute using the Python C
API instead of a native call.
"""
builder.enter()
rt_arg = RuntimeArg(SELF_NAME, RInstance(cls))
self_target = builder.add_self_to_env(cls)
arg = builder.read(self_target, line)
builder.ret_types[-1] = sig.ret_type
if do_pygetattr:
retval = builder.py_get_attr(arg, target.name, line)
else:
retval = builder.add(GetAttr(arg, target.name, line))
retbox = builder.coerce(retval, sig.ret_type, line)
builder.add(Return(retbox))
args, _, blocks, return_type, _ = builder.leave()
return FuncIR(
FuncDecl(
target.name + "__" + base.name + "_glue",
cls.name,
builder.module_name,
FuncSignature([rt_arg], return_type),
),
args,
blocks,
)
def get_func_target(builder: IRBuilder, fdef: FuncDef) -> AssignmentTarget:
"""Given a FuncDef, return the target for the instance of its callable class.
If the function was not already defined somewhere, then define it
and add it to the current environment.
"""
if fdef.original_def:
# Get the target associated with the previously defined FuncDef.
return builder.lookup(fdef.original_def)
if builder.fn_info.is_generator or builder.fn_info.add_nested_funcs_to_env:
return builder.lookup(fdef)
return builder.add_local_reg(fdef, object_rprimitive)
# This function still does not support the following imports.
# import json as _json
# from json import decoder
# Using either _json.JSONDecoder or decoder.JSONDecoder as a type hint for a dataclass field will fail.
# See issue mypyc/mypyc#1099.
def load_type(builder: IRBuilder, typ: TypeInfo, unbounded_type: Type | None, line: int) -> Value:
# typ.fullname contains the module where the class object was defined. However, it is possible
# that the class object's module was not imported in the file currently being compiled. So, we
# use unbounded_type.name (if provided by caller) to load the class object through one of the
# imported modules.
# Example: for `json.JSONDecoder`, typ.fullname is `json.decoder.JSONDecoder` but the Python
# file may import `json` not `json.decoder`.
# Another corner case: The Python file being compiled imports mod1 and has a type hint
# `mod1.OuterClass.InnerClass`. But, mod1/__init__.py might import OuterClass like this:
# `from mod2.mod3 import OuterClass`. In this case, typ.fullname is
# `mod2.mod3.OuterClass.InnerClass` and `unbounded_type.name` is `mod1.OuterClass.InnerClass`.
# So, we must use unbounded_type.name to load the class object.
# See issue mypyc/mypyc#1087.
load_attr_path = (
unbounded_type.name if isinstance(unbounded_type, UnboundType) else typ.fullname
).removesuffix(f".{typ.name}")
if typ in builder.mapper.type_to_ir:
class_ir = builder.mapper.type_to_ir[typ]
class_obj = builder.builder.get_native_type(class_ir)
elif typ.fullname in builtin_names:
builtin_addr_type, src = builtin_names[typ.fullname]
class_obj = builder.add(LoadAddress(builtin_addr_type, src, line))
# This elif-condition finds the longest import that matches the load_attr_path.
elif module_name := max(
(i for i in builder.imports if load_attr_path == i or load_attr_path.startswith(f"{i}.")),
default="",
key=len,
):
# Load the imported module.
loaded_module = builder.load_module(module_name)
# Recursively load attributes of the imported module. These may be submodules, classes or
# any other object.
for attr in (
load_attr_path.removeprefix(f"{module_name}.").split(".")
if load_attr_path != module_name
else []
):
loaded_module = builder.py_get_attr(loaded_module, attr, line)
class_obj = builder.builder.get_attr(
loaded_module, typ.name, object_rprimitive, line, borrow=False
)
else:
class_obj = builder.load_global_str(typ.name, line)
return class_obj
def load_func(builder: IRBuilder, func_name: str, fullname: str | None, line: int) -> Value:
if fullname and not fullname.startswith(builder.current_module):
# we're calling a function in a different module
# We can't use load_module_attr_by_fullname here because we need to load the function using
# func_name, not the name specified by fullname (which can be different for underscore
# function)
module = fullname.rsplit(".")[0]
loaded_module = builder.load_module(module)
func = builder.py_get_attr(loaded_module, func_name, line)
else:
func = builder.load_global_str(func_name, line)
return func
def generate_singledispatch_dispatch_function(
builder: IRBuilder, main_singledispatch_function_name: str, fitem: FuncDef
) -> None:
line = fitem.line
current_func_decl = builder.mapper.func_to_decl[fitem]
arg_info = get_args(builder, current_func_decl.sig.args, line)
dispatch_func_obj = builder.self()
arg_type = builder.builder.get_type_of_obj(arg_info.args[0], line)
dispatch_cache = builder.builder.get_attr(
dispatch_func_obj, "dispatch_cache", dict_rprimitive, line
)
call_find_impl, use_cache, call_func = BasicBlock(), BasicBlock(), BasicBlock()
get_result = builder.primitive_op(dict_get_method_with_none, [dispatch_cache, arg_type], line)
is_not_none = builder.translate_is_op(get_result, builder.none_object(), "is not", line)
impl_to_use = Register(object_rprimitive)
builder.add_bool_branch(is_not_none, use_cache, call_find_impl)
builder.activate_block(use_cache)
builder.assign(impl_to_use, get_result, line)
builder.goto(call_func)
builder.activate_block(call_find_impl)
find_impl = builder.load_module_attr_by_fullname("functools._find_impl", line)
registry = load_singledispatch_registry(builder, dispatch_func_obj, line)
uncached_impl = builder.py_call(find_impl, [arg_type, registry], line)
builder.call_c(exact_dict_set_item_op, [dispatch_cache, arg_type, uncached_impl], line)
builder.assign(impl_to_use, uncached_impl, line)
builder.goto(call_func)
builder.activate_block(call_func)
gen_calls_to_correct_impl(builder, impl_to_use, arg_info, fitem, line)
def gen_calls_to_correct_impl(
builder: IRBuilder, impl_to_use: Value, arg_info: ArgInfo, fitem: FuncDef, line: int
) -> None:
current_func_decl = builder.mapper.func_to_decl[fitem]
def gen_native_func_call_and_return(fdef: FuncDef) -> None:
func_decl = builder.mapper.func_to_decl[fdef]
ret_val = builder.builder.call(
func_decl, arg_info.args, arg_info.arg_kinds, arg_info.arg_names, line
)
coerced = builder.coerce(ret_val, current_func_decl.sig.ret_type, line)
builder.add(Return(coerced))
typ, src = builtin_names["builtins.int"]
int_type_obj = builder.add(LoadAddress(typ, src, line))
is_int = builder.builder.type_is_op(impl_to_use, int_type_obj, line)
native_call, non_native_call = BasicBlock(), BasicBlock()
builder.add_bool_branch(is_int, native_call, non_native_call)
builder.activate_block(native_call)
passed_id = builder.add(Unbox(impl_to_use, int_rprimitive, line))
native_ids = get_native_impl_ids(builder, fitem)
for impl, i in native_ids.items():
call_impl, next_impl = BasicBlock(), BasicBlock()
current_id = builder.load_int(i)
cond = builder.binary_op(passed_id, current_id, "==", line)
builder.add_bool_branch(cond, call_impl, next_impl)
# Call the registered implementation
builder.activate_block(call_impl)
gen_native_func_call_and_return(impl)
builder.activate_block(next_impl)
# We've already handled all the possible integer IDs, so we should never get here
builder.add(Unreachable())
builder.activate_block(non_native_call)
ret_val = builder.py_call(
impl_to_use, arg_info.args, line, arg_info.arg_kinds, arg_info.arg_names
)
coerced = builder.coerce(ret_val, current_func_decl.sig.ret_type, line)
builder.add(Return(coerced))
def gen_dispatch_func_ir(
builder: IRBuilder, fitem: FuncDef, main_func_name: str, dispatch_name: str, sig: FuncSignature
) -> tuple[FuncIR, Value]:
"""Create a dispatch function (a function that checks the first argument type and dispatches
to the correct implementation)
"""
builder.enter(FuncInfo(fitem, dispatch_name))
setup_callable_class(builder)
builder.fn_info.callable_class.ir.attributes["registry"] = dict_rprimitive
builder.fn_info.callable_class.ir.attributes["dispatch_cache"] = dict_rprimitive
builder.fn_info.callable_class.ir.has_dict = True
builder.fn_info.callable_class.ir.needs_getseters = True
generate_singledispatch_callable_class_ctor(builder)
generate_singledispatch_dispatch_function(builder, main_func_name, fitem)
args, _, blocks, _, fn_info = builder.leave()
dispatch_callable_class = add_call_to_callable_class(builder, args, blocks, sig, fn_info)
builder.functions.append(dispatch_callable_class)
add_get_to_callable_class(builder, fn_info)
add_register_method_to_callable_class(builder, fn_info)
func_reg = instantiate_callable_class(builder, fn_info)
dispatch_func_ir = generate_dispatch_glue_native_function(
builder, fitem, dispatch_callable_class.decl, dispatch_name
)
return dispatch_func_ir, func_reg
def generate_dispatch_glue_native_function(
builder: IRBuilder, fitem: FuncDef, callable_class_decl: FuncDecl, dispatch_name: str
) -> FuncIR:
line = fitem.line
builder.enter()
# We store the callable class in the globals dict for this function
callable_class = builder.load_global_str(dispatch_name, line)
decl = builder.mapper.func_to_decl[fitem]
arg_info = get_args(builder, decl.sig.args, line)
args = [callable_class] + arg_info.args
arg_kinds = [ArgKind.ARG_POS] + arg_info.arg_kinds
arg_names = arg_info.arg_names
arg_names.insert(0, "self")
ret_val = builder.builder.call(callable_class_decl, args, arg_kinds, arg_names, line)
builder.add(Return(ret_val))
arg_regs, _, blocks, _, fn_info = builder.leave()
return FuncIR(decl, arg_regs, blocks)
def generate_singledispatch_callable_class_ctor(builder: IRBuilder) -> None:
"""Create an __init__ that sets registry and dispatch_cache to empty dicts"""
line = -1
class_ir = builder.fn_info.callable_class.ir
with builder.enter_method(class_ir, "__init__", bool_rprimitive):
empty_dict = builder.call_c(dict_new_op, [], line)
builder.add(SetAttr(builder.self(), "registry", empty_dict, line))
cache_dict = builder.call_c(dict_new_op, [], line)
dispatch_cache_str = builder.load_str("dispatch_cache")
# use the py_setattr_op instead of SetAttr so that it also gets added to our __dict__
builder.primitive_op(py_setattr_op, [builder.self(), dispatch_cache_str, cache_dict], line)
# the generated C code seems to expect that __init__ returns a char, so just return 1
builder.add(Return(Integer(1, bool_rprimitive, line), line))
def add_register_method_to_callable_class(builder: IRBuilder, fn_info: FuncInfo) -> None:
line = -1
with builder.enter_method(fn_info.callable_class.ir, "register", object_rprimitive):
cls_arg = builder.add_argument("cls", object_rprimitive)
func_arg = builder.add_argument("func", object_rprimitive, ArgKind.ARG_OPT)
ret_val = builder.call_c(register_function, [builder.self(), cls_arg, func_arg], line)
builder.add(Return(ret_val, line))
def load_singledispatch_registry(builder: IRBuilder, dispatch_func_obj: Value, line: int) -> Value:
return builder.builder.get_attr(dispatch_func_obj, "registry", dict_rprimitive, line)
def singledispatch_main_func_name(orig_name: str) -> str:
return f"__mypyc_singledispatch_main_function_{orig_name}__"
def maybe_insert_into_registry_dict(builder: IRBuilder, fitem: FuncDef) -> None:
line = fitem.line
is_singledispatch_main_func = fitem in builder.singledispatch_impls
# dict of singledispatch_func to list of register_types (fitem is the function to register)
to_register: defaultdict[FuncDef, list[TypeInfo]] = defaultdict(list)
for main_func, impls in builder.singledispatch_impls.items():
for dispatch_type, impl in impls:
if fitem == impl:
to_register[main_func].append(dispatch_type)
if not to_register and not is_singledispatch_main_func:
return
if is_singledispatch_main_func:
main_func_name = singledispatch_main_func_name(fitem.name)
main_func_obj = load_func(builder, main_func_name, fitem.fullname, line)
loaded_object_type = builder.load_module_attr_by_fullname("builtins.object", line)
registry_dict = builder.builder.make_dict([(loaded_object_type, main_func_obj)], line)
dispatch_func_obj = builder.load_global_str(fitem.name, line)
builder.primitive_op(
py_setattr_op, [dispatch_func_obj, builder.load_str("registry"), registry_dict], line
)
for singledispatch_func, types in to_register.items():
# TODO: avoid recomputing the native IDs for all the functions every time we find a new
# function
native_ids = get_native_impl_ids(builder, singledispatch_func)
if fitem not in native_ids:
to_insert = load_func(builder, fitem.name, fitem.fullname, line)
else:
current_id = native_ids[fitem]
load_literal = LoadLiteral(current_id, object_rprimitive)
to_insert = builder.add(load_literal)
# TODO: avoid reloading the registry here if we just created it
dispatch_func_obj = load_func(
builder, singledispatch_func.name, singledispatch_func.fullname, line
)
registry = load_singledispatch_registry(builder, dispatch_func_obj, line)
for typ in types:
loaded_type = load_type(builder, typ, None, line)
builder.call_c(exact_dict_set_item_op, [registry, loaded_type, to_insert], line)
dispatch_cache = builder.builder.get_attr(
dispatch_func_obj, "dispatch_cache", dict_rprimitive, line
)
builder.gen_method_call(dispatch_cache, "clear", [], None, line)
def get_native_impl_ids(builder: IRBuilder, singledispatch_func: FuncDef) -> dict[FuncDef, int]:
"""Return a dict of registered implementation to native implementation ID for all
implementations
"""
impls = builder.singledispatch_impls[singledispatch_func]
return {impl: i for i, (typ, impl) in enumerate(impls) if not is_decorated(builder, impl)}
def gen_property_getter_ir(
builder: IRBuilder, func_decl: FuncDecl, cdef: ClassDef, is_trait: bool
) -> FuncIR:
"""Generate an implicit trivial property getter for an attribute.
These are used if an attribute can also be accessed as a property.
"""
name = func_decl.name
builder.enter(name)
self_reg = builder.add_argument("self", func_decl.sig.args[0].type)
if not is_trait:
value = builder.builder.get_attr(self_reg, name, func_decl.sig.ret_type, -1)
builder.add(Return(value))
else:
builder.add(Unreachable())
args, _, blocks, ret_type, fn_info = builder.leave()
return FuncIR(func_decl, args, blocks)
def gen_property_setter_ir(
builder: IRBuilder, func_decl: FuncDecl, cdef: ClassDef, is_trait: bool
) -> FuncIR:
"""Generate an implicit trivial property setter for an attribute.
These are used if an attribute can also be accessed as a property.
"""
name = func_decl.name
builder.enter(name)
self_reg = builder.add_argument("self", func_decl.sig.args[0].type)
value_reg = builder.add_argument("value", func_decl.sig.args[1].type)
assert name.startswith(PROPSET_PREFIX)
attr_name = name[len(PROPSET_PREFIX) :]
if not is_trait:
builder.add(SetAttr(self_reg, attr_name, value_reg, -1))
builder.add(Return(builder.none()))
args, _, blocks, ret_type, fn_info = builder.leave()
return FuncIR(func_decl, args, blocks)
| ArgInfo |
python | ethereum__web3.py | web3/contract/async_contract.py | {
"start": 11965,
"end": 12311
} | class ____(BaseContractFunctions[AsyncContractFunction]):
def __init__(
self,
abi: ABI,
w3: "AsyncWeb3[Any]",
address: ChecksumAddress | None = None,
decode_tuples: bool | None = False,
) -> None:
super().__init__(abi, w3, AsyncContractFunction, address, decode_tuples)
| AsyncContractFunctions |
python | getsentry__sentry | src/sentry/apidocs/parameters.py | {
"start": 15074,
"end": 16099
} | class ____:
WORKFLOW_ID = OpenApiParameter(
name="workflow_id",
location="path",
required=True,
type=int,
description="The ID of the workflow you'd like to query.",
)
QUERY = OpenApiParameter(
name="query",
location="query",
required=False,
type=str,
description="An optional search query for filtering workflows.",
)
SORT_BY = OpenApiParameter(
name="sortBy",
location="query",
required=False,
type=str,
description="""The field to sort results by. If not specified, the results are sorted by id.
Available fields are:
- `name`
- `id`
- `dateCreated`
- `dateUpdated`
- `connectedDetectors`
- `actions`
Prefix with `-` to sort in descending order.
""",
)
ID = OpenApiParameter(
name="id",
location="query",
required=False,
type=int,
description="The ID of the workflow you'd like to query.",
many=True,
)
| WorkflowParams |
python | sympy__sympy | sympy/codegen/cfunctions.py | {
"start": 8539,
"end": 9676
} | class ____(Function): # 'sqrt' already defined in sympy.functions.elementary.miscellaneous
"""
Represents the square root function.
Explanation
===========
The reason why one would use ``Sqrt(x)`` over ``sqrt(x)``
is that the latter is internally represented as ``Pow(x, S.Half)`` which
may not be what one wants when doing code-generation.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import Sqrt
>>> Sqrt(x)
Sqrt(x)
>>> Sqrt(x).diff(x)
1/(2*sqrt(x))
See Also
========
Cbrt
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return Pow(self.args[0], Rational(-1, 2))/_Two
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _Sqrt(*self.args)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
return _Sqrt(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
def _Cbrt(x):
return Pow(x, Rational(1, 3))
| Sqrt |
python | tqdm__tqdm | tqdm/std.py | {
"start": 6939,
"end": 7811
} | class ____(object):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
smoothing : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields old value) to 1 (yields new value).
"""
def __init__(self, smoothing=0.3):
self.alpha = smoothing
self.last = 0
self.calls = 0
def __call__(self, x=None):
"""
Parameters
----------
x : float
New value to include in EMA.
"""
beta = 1 - self.alpha
if x is not None:
self.last = self.alpha * x + beta * self.last
self.calls += 1
return self.last / (1 - beta ** self.calls) if self.calls else self.last
| EMA |
python | keon__algorithms | tests/test_map.py | {
"start": 2722,
"end": 4327
} | class ____(unittest.TestCase):
def test_one_entry(self):
m = SeparateChainingHashTable(10)
m.put(1, '1')
self.assertEqual('1', m.get(1))
def test_two_entries_with_same_hash(self):
m = SeparateChainingHashTable(10)
m.put(1, '1')
m.put(11, '11')
self.assertEqual('1', m.get(1))
self.assertEqual('11', m.get(11))
def test_len_trivial(self):
m = SeparateChainingHashTable(10)
self.assertEqual(0, len(m))
for i in range(10):
m.put(i, i)
self.assertEqual(i + 1, len(m))
def test_len_after_deletions(self):
m = SeparateChainingHashTable(10)
m.put(1, 1)
self.assertEqual(1, len(m))
m.del_(1)
self.assertEqual(0, len(m))
m.put(11, 42)
self.assertEqual(1, len(m))
def test_delete_key(self):
m = SeparateChainingHashTable(10)
for i in range(5):
m.put(i, i**2)
m.del_(1)
self.assertEqual(None, m.get(1))
self.assertEqual(4, m.get(2))
def test_delete_key_and_reassign(self):
m = SeparateChainingHashTable(10)
m.put(1, 1)
del m[1]
m.put(1, 2)
self.assertEqual(2, m.get(1))
def test_add_entry_bigger_than_table_size(self):
m = SeparateChainingHashTable(10)
m.put(11, '1')
self.assertEqual('1', m.get(11))
def test_get_none_if_key_missing_and_hash_collision(self):
m = SeparateChainingHashTable(10)
m.put(1, '1')
self.assertEqual(None, m.get(11))
| TestSeparateChainingHashTable |
python | django__django | django/contrib/gis/management/commands/ogrinspect.py | {
"start": 558,
"end": 1015
} | class ____(argparse.Action):
"""
Custom argparse action for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.lower() == "true":
setattr(namespace, self.dest, True)
else:
setattr(namespace, self.dest, value.split(","))
| ListOptionAction |
python | spyder-ide__spyder | spyder/plugins/completion/providers/snippets/trie.py | {
"start": 2009,
"end": 2539
} | class ____(TrieNode):
def __init__(self):
super().__init__('')
self.sequences = []
def __getitem__(self, sequence):
if sequence:
elem = sequence[0]
if elem in self.children:
node = self.children[elem]
return node[sequence]
else:
return self
return None
def __setitem__(self, sequence, value):
if sequence:
super().__setitem__(sequence, value)
else:
self.value = value
| Trie |
python | pytorch__pytorch | test/test_python_dispatch.py | {
"start": 1689,
"end": 1961
} | class ____(TestCase):
def test_call_boxed(self) -> None:
sin = torch._C._dispatch_find_schema_or_throw("aten::sin", "")
x = torch.randn(3)
y = torch._C._dispatch_call_boxed(sin, x)
self.assertEqual(y, x.sin())
| TestDispatcherPythonBindings |
python | kamyu104__LeetCode-Solutions | Python/grid-teleportation-traversal.py | {
"start": 68,
"end": 1407
} | class ____(object):
def minMoves(self, matrix):
"""
:type matrix: List[str]
:rtype: int
"""
DIRECTIONS = [(0, -1), (0, 1), (-1, 0), (1, 0)]
m, n = len(matrix), len(matrix[0])
lookup = [[] for _ in xrange(26)]
for i in xrange(m):
for j in xrange(n):
if matrix[i][j] in ".#":
continue
lookup[ord(matrix[i][j])-ord('A')].append((i, j))
lookup2 = [[False]*len(matrix[0]) for _ in xrange(m)]
dq = collections.deque([(0, 0, 0)])
while dq:
step, i, j = dq.popleft()
if lookup2[i][j]:
continue
lookup2[i][j] = True
if (i, j) == (m-1, n-1):
return step
for di, dj in DIRECTIONS:
ni, nj = i+di, j+dj
if not (0 <= ni < m and 0 <= nj < n and matrix[ni][nj] != '#' and not lookup2[ni][nj]):
continue
dq.append((step+1, ni, nj))
if matrix[i][j] == '.':
continue
for ni, nj in lookup[ord(matrix[i][j])-ord('A')]:
if lookup2[ni][nj]:
continue
dq.appendleft((step, ni, nj))
lookup[ord(matrix[i][j])-ord('A')] = []
return -1
| Solution |
python | pandas-dev__pandas | pandas/io/formats/style_render.py | {
"start": 1272,
"end": 1416
} | class ____(TypedDict):
selector: str
props: CSSProperties
CSSStyles: TypeAlias = list[CSSDict]
Subset = slice | Sequence | Index
| CSSDict |
python | weaviate__weaviate-python-client | weaviate/aliases/async_.py | {
"start": 165,
"end": 226
} | class ____(_AliasExecutor[ConnectionAsync]):
pass
| _AliasAsync |
python | huggingface__transformers | src/transformers/models/blt/modeling_blt.py | {
"start": 20109,
"end": 25270
} | class ____(BltPreTrainedModel):
config: BltLocalEncoderConfig
_can_record_outputs = {
"encoder_attentions": OutputRecorder(BltSelfAttention, index=1, layer_name="local_encoder"),
}
def __init__(self, config: BltLocalEncoderConfig):
super().__init__(config)
self.gradient_checkpointing = False
self.config = config
self.layers = nn.ModuleList(
[BltTransformerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.rotary_emb = BltRotaryEmbedding(config=config)
self.patch_embedding_projection = nn.Linear(
in_features=config.hidden_size,
out_features=config.hidden_size * config.cross_attn_k,
bias=False,
)
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
self.cross_attn_layers = nn.ModuleList()
layers_to_add = config.num_hidden_layers if config.cross_attn_all_layers else 1
for layer_idx in range(layers_to_add):
self.cross_attn_layers.append(
BltCrossAttention(config=config, layer_idx=layer_idx, hidden_size=config.hidden_size)
)
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
patch_embeds: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
num_patches: Optional[int] = None,
patch_ids: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
):
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
batch_size = inputs_embeds.shape[0]
hidden_states = F.dropout(inputs_embeds, p=self.config.dropout, training=self.training)
if position_ids is None:
position_ids = (
torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device).unsqueeze(0).expand(batch_size, -1)
)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
hidden_states = F.dropout(hidden_states, p=self.config.dropout, training=self.training)
for idx, layer in enumerate(self.layers):
hidden_states = layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
if idx == len(self.layers) - 1 or self.config.cross_attn_all_layers:
patch_embeds = self.patch_reduce(hidden_states, num_patches, patch_ids)
patch_embeds = self.patch_embedding_projection(patch_embeds)
patch_embeds = patch_embeds.reshape(
batch_size, patch_embeds.shape[1] * self.config.cross_attn_k, self.config.hidden_size
)
layer_idx = idx if self.config.cross_attn_all_layers else 0
cross_attention_output, _ = self.cross_attn_layers[layer_idx](
hidden_states=patch_embeds,
cross_attention_states=hidden_states,
attention_mask=encoder_attention_mask,
**kwargs,
)
patch_embeds = patch_embeds + cross_attention_output
encoder_cross_states = patch_embeds
return hidden_states, encoder_cross_states
def patch_reduce(self, hidden_states, max_num_patches, patch_ids):
"""
Reduce variable length patches to single embedding per patch
Note: this works with variable number of patches for different sequences in the batch
It handles variable length patches by assuming that patch_lengths will be 0 for any
extra patches on the *right*. Since there can be a variable number of patches
this function also return the number of patches for each sequence in the batch.
Any embeddings on the right that are not allocated to a patch
(i.e. if the sum(patch_lengths[i]) < seq_len for any i)
will be sent to a dummy patch, which is trimmed before returning.
"""
batch_size = hidden_states.shape[0]
embedding_dim = hidden_states.shape[-1]
patch_ids = patch_ids.unsqueeze(-1).expand(-1, -1, hidden_states.shape[-1])
reduced_embeddings = torch.zeros(
(batch_size, max_num_patches, embedding_dim), dtype=hidden_states.dtype, device=hidden_states.device
)
reduced_embeddings = reduced_embeddings.scatter_reduce(
src=hidden_states,
dim=1,
index=patch_ids,
reduce="amax",
include_self=False,
)
reduced_embeddings = reduced_embeddings[:, :max_num_patches, :]
return reduced_embeddings
| BltLocalEncoder |
python | ansible__ansible | test/units/module_utils/urls/test_fetch_url.py | {
"start": 849,
"end": 7346
} | class ____:
def __init__(self):
self.params = {}
self.tmpdir = None
def exit_json(self, *args, **kwargs):
raise ExitJson(*args, **kwargs)
def fail_json(self, *args, **kwargs):
raise FailJson(*args, **kwargs)
def test_fetch_url(open_url_mock, fake_ansible_module):
r, info = fetch_url(fake_ansible_module, BASE_URL)
dummy, kwargs = open_url_mock.call_args
open_url_mock.assert_called_once_with(BASE_URL, client_cert=None, client_key=None, cookies=kwargs['cookies'], data=None,
follow_redirects='urllib2', force=False, force_basic_auth='', headers=None,
http_agent='ansible-httpget', last_mod_time=None, method=None, timeout=10, url_password='', url_username='',
use_proxy=True, validate_certs=True, use_gssapi=False, unix_socket=None, ca_path=None, unredirected_headers=None,
decompress=True, ciphers=None, use_netrc=True)
def test_fetch_url_params(open_url_mock, fake_ansible_module):
fake_ansible_module.params = {
'validate_certs': False,
'url_username': 'user',
'url_password': 'passwd',
'http_agent': 'ansible-test',
'force_basic_auth': True,
'follow_redirects': 'all',
'client_cert': 'client.pem',
'client_key': 'client.key',
}
r, info = fetch_url(fake_ansible_module, BASE_URL)
dummy, kwargs = open_url_mock.call_args
open_url_mock.assert_called_once_with(BASE_URL, client_cert='client.pem', client_key='client.key', cookies=kwargs['cookies'], data=None,
follow_redirects='all', force=False, force_basic_auth=True, headers=None,
http_agent='ansible-test', last_mod_time=None, method=None, timeout=10, url_password='passwd', url_username='user',
use_proxy=True, validate_certs=False, use_gssapi=False, unix_socket=None, ca_path=None, unredirected_headers=None,
decompress=True, ciphers=None, use_netrc=True)
def test_fetch_url_cookies(mocker, fake_ansible_module):
def make_cookies(*args, **kwargs):
cookies = kwargs['cookies']
r = MagicMock()
r.headers = http.client.HTTPMessage()
add_header = r.headers.add_header
r.info.return_value = r.headers
for name, value in (('Foo', 'bar'), ('Baz', 'qux')):
cookie = Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain="ansible.com",
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
cookies.set_cookie(cookie)
add_header('Set-Cookie', '%s=%s' % (name, value))
return r
mocker = mocker.patch('ansible.module_utils.urls.open_url', new=make_cookies)
r, info = fetch_url(fake_ansible_module, BASE_URL)
assert info['cookies'] == {'Baz': 'qux', 'Foo': 'bar'}
if sys.version_info < (3, 11):
# Python sorts cookies in order of most specific (ie. longest) path first
# items with the same path are reversed from response order
assert info['cookies_string'] == 'Baz=qux; Foo=bar'
else:
# Python 3.11 and later preserve the Set-Cookie order.
# See: https://github.com/python/cpython/pull/22745/
assert info['cookies_string'] == 'Foo=bar; Baz=qux'
# The key here has a `-` as opposed to what we see in the `uri` module that converts to `_`
# Note: this is response order, which differs from cookies_string
assert info['set-cookie'] == 'Foo=bar, Baz=qux'
def test_fetch_url_connectionerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = ConnectionError('TESTS')
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, BASE_URL)
assert excinfo.value.kwargs['msg'] == 'TESTS'
assert BASE_URL == excinfo.value.kwargs['url']
assert excinfo.value.kwargs['status'] == -1
open_url_mock.side_effect = ValueError('TESTS')
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, BASE_URL)
assert excinfo.value.kwargs['msg'] == 'TESTS'
assert BASE_URL == excinfo.value.kwargs['url']
assert excinfo.value.kwargs['status'] == -1
def test_fetch_url_httperror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = urllib.error.HTTPError(
BASE_URL,
500,
'Internal Server Error',
{'Content-Type': 'application/json'},
io.StringIO('TESTS')
)
r, info = fetch_url(fake_ansible_module, BASE_URL)
assert info == {'msg': 'HTTP Error 500: Internal Server Error', 'body': 'TESTS',
'status': 500, 'url': BASE_URL, 'content-type': 'application/json'}
def test_fetch_url_urlerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = urllib.error.URLError('TESTS')
r, info = fetch_url(fake_ansible_module, BASE_URL)
assert info == {'msg': 'Request failed: <urlopen error TESTS>', 'status': -1, 'url': BASE_URL}
def test_fetch_url_socketerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = OSError('TESTS')
r, info = fetch_url(fake_ansible_module, BASE_URL)
assert info == {'msg': 'Connection failure: TESTS', 'status': -1, 'url': BASE_URL}
def test_fetch_url_exception(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = Exception('TESTS')
r, info = fetch_url(fake_ansible_module, BASE_URL)
exception = info.pop('exception')
assert info == {'msg': 'An unknown error occurred: TESTS', 'status': -1, 'url': BASE_URL}
assert "Exception: TESTS" in exception
def test_fetch_url_badstatusline(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = http.client.BadStatusLine('TESTS')
r, info = fetch_url(fake_ansible_module, BASE_URL)
assert info == {'msg': 'Connection failure: connection was closed before a valid response was received: TESTS', 'status': -1, 'url': BASE_URL}
| FakeAnsibleModule |
python | python-openxml__python-docx | tests/styles/test_style.py | {
"start": 16990,
"end": 20196
} | class ____:
def it_knows_its_next_paragraph_style(self, next_get_fixture):
style, expected_value = next_get_fixture
assert style.next_paragraph_style == expected_value
def it_can_change_its_next_paragraph_style(self, next_set_fixture):
style, next_style, expected_xml = next_set_fixture
style.next_paragraph_style = next_style
assert style.element.xml == expected_xml
def it_provides_access_to_its_paragraph_format(self, parfmt_fixture):
style, ParagraphFormat_, paragraph_format_ = parfmt_fixture
paragraph_format = style.paragraph_format
ParagraphFormat_.assert_called_once_with(style._element)
assert paragraph_format is paragraph_format_
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
("H1", "Body"),
("H2", "H2"),
("Body", "Body"),
("Foo", "Foo"),
]
)
def next_get_fixture(self, request):
style_name, next_style_name = request.param
styles = element(
"w:styles/("
"w:style{w:type=paragraph,w:styleId=H1}/w:next{w:val=Body},"
"w:style{w:type=paragraph,w:styleId=H2}/w:next{w:val=Char},"
"w:style{w:type=paragraph,w:styleId=Body},"
"w:style{w:type=paragraph,w:styleId=Foo}/w:next{w:val=Bar},"
"w:style{w:type=character,w:styleId=Char})"
)
style_names = ["H1", "H2", "Body", "Foo", "Char"]
style_elm = styles[style_names.index(style_name)]
next_style_elm = styles[style_names.index(next_style_name)]
style = ParagraphStyle(style_elm)
next_style = ParagraphStyle(next_style_elm) if style_name == "H1" else style
return style, next_style
@pytest.fixture(
params=[
("H", "B", "w:style{w:type=paragraph,w:styleId=H}/w:next{w:val=B}"),
("H", None, "w:style{w:type=paragraph,w:styleId=H}"),
("H", "H", "w:style{w:type=paragraph,w:styleId=H}"),
]
)
def next_set_fixture(self, request):
style_name, next_style_name, style_cxml = request.param
styles = element(
"w:styles/(w:style{w:type=paragraph,w:styleId=H},w:style{w:type=paragraph,w:styleId=B})"
)
style_elms = {"H": styles[0], "B": styles[1]}
style = ParagraphStyle(style_elms[style_name])
next_style = ParagraphStyle(style_elms[next_style_name]) if next_style_name else None
expected_xml = xml(style_cxml)
return style, next_style, expected_xml
@pytest.fixture
def parfmt_fixture(self, ParagraphFormat_, paragraph_format_):
style = ParagraphStyle(element("w:style"))
return style, ParagraphFormat_, paragraph_format_
# fixture components ---------------------------------------------
@pytest.fixture
def ParagraphFormat_(self, request, paragraph_format_):
return class_mock(
request, "docx.styles.style.ParagraphFormat", return_value=paragraph_format_
)
@pytest.fixture
def paragraph_format_(self, request):
return instance_mock(request, ParagraphFormat)
| DescribeParagraphStyle |
python | ApeWorX__ape | src/ape/pytest/contextmanagers.py | {
"start": 571,
"end": 6985
} | class ____(ManagerAccessMixin):
def __init__(
self,
expected_message: Optional[Union[_RevertMessage, type[CustomError], ErrorABI]] = None,
dev_message: Optional[_RevertMessage] = None,
**error_inputs,
):
self.expected_message = expected_message
self.dev_message = dev_message
self.error_inputs = error_inputs
self.revert_info: Optional[RevertInfo] = None
def _check_dev_message(self, exception: ContractLogicError):
"""
Attempts to extract a dev-message from the contract source code by inspecting what
instruction(s) led to a transaction revert.
Raises:
AssertionError: When the trace or source can not be retrieved, the dev message cannot
be found, or the found dev message does not match the expected dev message.
"""
try:
dev_message = exception.dev_message
except ValueError as err:
raise AssertionError(str(err)) from err
if dev_message is None:
err_message = "Could not find the source of the revert."
# Attempt to show source traceback so the user can see the real failure.
if (info := self.revert_info) and (ex := info.value):
if tb := ex.source_traceback:
err_message = f"{err_message}\n{tb}"
raise AssertionError(err_message)
if not (
(self.dev_message.match(dev_message) is not None)
if isinstance(self.dev_message, re.Pattern)
else (dev_message == self.dev_message)
):
assertion_error_message = (
self.dev_message.pattern
if isinstance(self.dev_message, re.Pattern)
else self.dev_message
)
assertion_error_prefix = f"Expected dev revert message '{assertion_error_message}'"
raise AssertionError(f"{assertion_error_prefix} but got '{dev_message}'.")
def _check_expected_message(self, exception: ContractLogicError):
"""
Compares the revert message given by the exception to the expected message.
Raises:
AssertionError: When the exception message is ``None`` or if the message does not match
the expected message.
"""
actual = exception.revert_message
assertion_error_message = (
self.expected_message.pattern
if isinstance(self.expected_message, re.Pattern)
else self.expected_message
)
assertion_error_prefix = f"Expected revert message '{assertion_error_message}'"
message_matches = (
(self.expected_message.match(actual) is not None)
if isinstance(self.expected_message, re.Pattern)
else (actual == self.expected_message)
)
if not message_matches:
if actual == TransactionError.DEFAULT_MESSAGE:
# The transaction failed without a revert message
# but the user is expecting one.
raise AssertionError(f"{assertion_error_prefix} but there was none.")
raise AssertionError(f"{assertion_error_prefix} but got '{actual}'.")
def _check_custom_error(self, exception: Union[CustomError]):
# perf: avoid loading from contracts namespace until needed.
from ape.contracts import ContractInstance
expected_error_cls = self.expected_message
if not isinstance(expected_error_cls, ErrorABI) and not isinstance(
expected_error_cls, type
):
# Not expecting a custom error type.
return
elif (
isinstance(expected_error_cls, type)
and issubclass(expected_error_cls, CustomError)
and not isinstance(exception, expected_error_cls)
and isinstance(getattr(expected_error_cls, "contract", None), ContractInstance)
):
# NOTE: This is the check that ensures the error class is coming from
# the expected contract instance (e.g. from the same address).
# If not address is being compared, this check is skipped.
raise AssertionError(
f"Expected error '{expected_error_cls.__name__}' "
f"but was '{type(exception).__name__}'"
)
if not self.error_inputs:
return
# Making assertions on inputs to error.
incorrect_values = []
actual_error_inputs = exception.inputs
for ipt_name, expected_ipt in self.error_inputs.items():
if ipt_name not in actual_error_inputs:
# Assertion is not being made on this input.
continue
actual_ipt = actual_error_inputs[ipt_name]
if actual_ipt != expected_ipt:
incorrect_values.append(
f"Expected input '{ipt_name}' to be '{expected_ipt}' but was '{actual_ipt}'."
)
if incorrect_values:
raise AssertionError("\n".join(incorrect_values))
def __enter__(self, *args, **kwargs):
info = RevertInfo()
self.revert_info = info
return info
def __exit__(self, exc_type: type, exc_value: Exception, traceback) -> bool:
if exc_type is None:
raise AssertionError("Transaction did not revert.")
if not isinstance(exc_value, ContractLogicError):
raise AssertionError(
f"Transaction did not revert.\n"
f"However, an exception of type {type(exc_value)} occurred: {exc_value}."
) from exc_value
# Set the exception on the returned info.
# This allows the user to make further assertions on the exception.
if self.revert_info is not None:
self.revert_info.value = exc_value
if self.dev_message is not None:
self._check_dev_message(exc_value)
if self.expected_message is not None and isinstance(self.expected_message, (str, Pattern)):
self._check_expected_message(exc_value)
elif self.expected_message is not None and isinstance(exc_value, CustomError):
# Is a custom error type.
self._check_custom_error(exc_value)
# Returning True causes the expected exception not to get raised
# and the test to pass
return True
| RevertsContextManager |
python | modin-project__modin | modin/core/computation/engines.py | {
"start": 1768,
"end": 3385
} | class ____(metaclass=abc.ABCMeta):
"""Object serving as a base class for all engines."""
has_neg_frac = False
def __init__(self, expr) -> None:
self.expr = expr
self.aligned_axes = None
self.result_type = None
def convert(self) -> str:
"""
Convert an expression for evaluation.
Defaults to return the expression as a string.
"""
return printing.pprint_thing(self.expr)
def evaluate(self) -> object:
"""
Run the engine on the expression.
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
Returns
-------
object
The result of the passed expression.
"""
if not self._is_aligned:
self.result_type, self.aligned_axes = align_terms(self.expr.terms)
# make sure no names in resolvers and locals/globals clash
res = self._evaluate()
return reconstruct_object(
self.result_type, res, self.aligned_axes, self.expr.terms.return_type
)
@property
def _is_aligned(self) -> bool:
return self.aligned_axes is not None and self.result_type is not None
@abc.abstractmethod
def _evaluate(self):
"""
Return an evaluated expression.
Parameters
----------
env : Scope
The local and global environment in which to evaluate an
expression.
Notes
-----
Must be implemented by subclasses.
"""
| AbstractEngine |
python | sympy__sympy | sympy/printing/pretty/stringpict.py | {
"start": 697,
"end": 12132
} | class ____:
"""An ASCII picture.
The pictures are represented as a list of equal length strings.
"""
#special value for stringPict.below
LINE = 'line'
def __init__(self, s, baseline=0):
"""Initialize from string.
Multiline strings are centered.
"""
self.s = s
#picture is a string that just can be printed
self.picture = stringPict.equalLengths(s.splitlines())
#baseline is the line number of the "base line"
self.baseline = baseline
self.binding = None
@staticmethod
def equalLengths(lines):
# empty lines
if not lines:
return ['']
width = max(line_width(line) for line in lines)
return [center(line, width) for line in lines]
def height(self):
"""The height of the picture in characters."""
return len(self.picture)
def width(self):
"""The width of the picture in characters."""
return line_width(self.picture[0])
@staticmethod
def next(*args):
"""Put a string of stringPicts next to each other.
Returns string, baseline arguments for stringPict.
"""
#convert everything to stringPicts
objects = []
for arg in args:
if isinstance(arg, str):
arg = stringPict(arg)
objects.append(arg)
#make a list of pictures, with equal height and baseline
newBaseline = max(obj.baseline for obj in objects)
newHeightBelowBaseline = max(
obj.height() - obj.baseline
for obj in objects)
newHeight = newBaseline + newHeightBelowBaseline
pictures = []
for obj in objects:
oneEmptyLine = [' '*obj.width()]
basePadding = newBaseline - obj.baseline
totalPadding = newHeight - obj.height()
pictures.append(
oneEmptyLine * basePadding +
obj.picture +
oneEmptyLine * (totalPadding - basePadding))
result = [''.join(lines) for lines in zip(*pictures)]
return '\n'.join(result), newBaseline
def right(self, *args):
r"""Put pictures next to this one.
Returns string, baseline arguments for stringPict.
(Multiline) strings are allowed, and are given a baseline of 0.
Examples
========
>>> from sympy.printing.pretty.stringpict import stringPict
>>> print(stringPict("10").right(" + ",stringPict("1\r-\r2",1))[0])
1
10 + -
2
"""
return stringPict.next(self, *args)
def left(self, *args):
"""Put pictures (left to right) at left.
Returns string, baseline arguments for stringPict.
"""
return stringPict.next(*(args + (self,)))
@staticmethod
def stack(*args):
"""Put pictures on top of each other,
from top to bottom.
Returns string, baseline arguments for stringPict.
The baseline is the baseline of the second picture.
Everything is centered.
Baseline is the baseline of the second picture.
Strings are allowed.
The special value stringPict.LINE is a row of '-' extended to the width.
"""
#convert everything to stringPicts; keep LINE
objects = []
for arg in args:
if arg is not stringPict.LINE and isinstance(arg, str):
arg = stringPict(arg)
objects.append(arg)
#compute new width
newWidth = max(
obj.width()
for obj in objects
if obj is not stringPict.LINE)
lineObj = stringPict(hobj('-', newWidth))
#replace LINE with proper lines
for i, obj in enumerate(objects):
if obj is stringPict.LINE:
objects[i] = lineObj
#stack the pictures, and center the result
newPicture = [center(line, newWidth) for obj in objects for line in obj.picture]
newBaseline = objects[0].height() + objects[1].baseline
return '\n'.join(newPicture), newBaseline
def below(self, *args):
"""Put pictures under this picture.
Returns string, baseline arguments for stringPict.
Baseline is baseline of top picture
Examples
========
>>> from sympy.printing.pretty.stringpict import stringPict
>>> print(stringPict("x+3").below(
... stringPict.LINE, '3')[0]) #doctest: +NORMALIZE_WHITESPACE
x+3
---
3
"""
s, baseline = stringPict.stack(self, *args)
return s, self.baseline
def above(self, *args):
"""Put pictures above this picture.
Returns string, baseline arguments for stringPict.
Baseline is baseline of bottom picture.
"""
string, baseline = stringPict.stack(*(args + (self,)))
baseline = len(string.splitlines()) - self.height() + self.baseline
return string, baseline
def parens(self, left='(', right=')', ifascii_nougly=False):
"""Put parentheses around self.
Returns string, baseline arguments for stringPict.
left or right can be None or empty string which means 'no paren from
that side'
"""
h = self.height()
b = self.baseline
# XXX this is a hack -- ascii parens are ugly!
if ifascii_nougly and not pretty_use_unicode():
h = 1
b = 0
res = self
if left:
lparen = stringPict(vobj(left, h), baseline=b)
res = stringPict(*lparen.right(self))
if right:
rparen = stringPict(vobj(right, h), baseline=b)
res = stringPict(*res.right(rparen))
return ('\n'.join(res.picture), res.baseline)
def leftslash(self):
"""Precede object by a slash of the proper size.
"""
# XXX not used anywhere ?
height = max(
self.baseline,
self.height() - 1 - self.baseline)*2 + 1
slash = '\n'.join(
' '*(height - i - 1) + xobj('/', 1) + ' '*i
for i in range(height)
)
return self.left(stringPict(slash, height//2))
def root(self, n=None):
"""Produce a nice root symbol.
Produces ugly results for big n inserts.
"""
# XXX not used anywhere
# XXX duplicate of root drawing in pretty.py
#put line over expression
result = self.above('_'*self.width())
#construct right half of root symbol
height = self.height()
slash = '\n'.join(
' ' * (height - i - 1) + '/' + ' ' * i
for i in range(height)
)
slash = stringPict(slash, height - 1)
#left half of root symbol
if height > 2:
downline = stringPict('\\ \n \\', 1)
else:
downline = stringPict('\\')
#put n on top, as low as possible
if n is not None and n.width() > downline.width():
downline = downline.left(' '*(n.width() - downline.width()))
downline = downline.above(n)
#build root symbol
root = downline.right(slash)
#glue it on at the proper height
#normally, the root symbel is as high as self
#which is one less than result
#this moves the root symbol one down
#if the root became higher, the baseline has to grow too
root.baseline = result.baseline - result.height() + root.height()
return result.left(root)
def render(self, * args, **kwargs):
"""Return the string form of self.
Unless the argument line_break is set to False, it will
break the expression in a form that can be printed
on the terminal without being broken up.
"""
if _GLOBAL_WRAP_LINE is not None:
kwargs["wrap_line"] = _GLOBAL_WRAP_LINE
if kwargs["wrap_line"] is False:
return "\n".join(self.picture)
if kwargs["num_columns"] is not None:
# Read the argument num_columns if it is not None
ncols = kwargs["num_columns"]
else:
# Attempt to get a terminal width
ncols = self.terminal_width()
if ncols <= 0:
ncols = 80
# If smaller than the terminal width, no need to correct
if self.width() <= ncols:
return type(self.picture[0])(self)
"""
Break long-lines in a visually pleasing format.
without overflow indicators | with overflow indicators
| 2 2 3 | | 2 2 3 ↪|
|6*x *y + 4*x*y + | |6*x *y + 4*x*y + ↪|
| | | |
| 3 4 4 | |↪ 3 4 4 |
|4*y*x + x + y | |↪ 4*y*x + x + y |
|a*c*e + a*c*f + a*d | |a*c*e + a*c*f + a*d ↪|
|*e + a*d*f + b*c*e | | |
|+ b*c*f + b*d*e + b | |↪ *e + a*d*f + b*c* ↪|
|*d*f | | |
| | |↪ e + b*c*f + b*d*e ↪|
| | | |
| | |↪ + b*d*f |
"""
overflow_first = ""
if kwargs["use_unicode"] or pretty_use_unicode():
overflow_start = "\N{RIGHTWARDS ARROW WITH HOOK} "
overflow_end = " \N{RIGHTWARDS ARROW WITH HOOK}"
else:
overflow_start = "> "
overflow_end = " >"
def chunks(line):
"""Yields consecutive chunks of line_width ncols"""
prefix = overflow_first
width, start = line_width(prefix + overflow_end), 0
for i, x in enumerate(line):
wx = line_width(x)
# Only flush the screen when the current character overflows.
# This way, combining marks can be appended even when width == ncols.
if width + wx > ncols:
yield prefix + line[start:i] + overflow_end
prefix = overflow_start
width, start = line_width(prefix + overflow_end), i
width += wx
yield prefix + line[start:]
# Concurrently assemble chunks of all lines into individual screens
pictures = zip(*map(chunks, self.picture))
# Join lines of each screen into sub-pictures
pictures = ["\n".join(picture) for picture in pictures]
# Add spacers between sub-pictures
return "\n\n".join(pictures)
def terminal_width(self):
"""Return the terminal width if possible, otherwise return 0.
"""
size = shutil.get_terminal_size(fallback=(0, 0))
return size.columns
def __eq__(self, o):
if isinstance(o, str):
return '\n'.join(self.picture) == o
elif isinstance(o, stringPict):
return o.picture == self.picture
return False
def __hash__(self):
return super().__hash__()
def __str__(self):
return '\n'.join(self.picture)
def __repr__(self):
return "stringPict(%r,%d)" % ('\n'.join(self.picture), self.baseline)
def __getitem__(self, index):
return self.picture[index]
def __len__(self):
return len(self.s)
| stringPict |
python | gevent__gevent | src/gevent/tests/known_failures.py | {
"start": 1598,
"end": 2663
} | class ____(ConstantCondition):
__slots__ = (
)
def __init__(self, name):
ConstantCondition.__init__(self, getattr(sysinfo, name), name)
PYPY = _AttrCondition('PYPY')
PYPY3 = _AttrCondition('PYPY3')
PY3 = _AttrCondition('PY3')
PY2 = _AttrCondition('PY2')
OSX = _AttrCondition('OSX')
LIBUV = _AttrCondition('LIBUV')
WIN = _AttrCondition('WIN')
APPVEYOR = _AttrCondition('RUNNING_ON_APPVEYOR')
TRAVIS = _AttrCondition('RUNNING_ON_TRAVIS')
CI = _AttrCondition('RUNNING_ON_CI')
LEAKTEST = _AttrCondition('RUN_LEAKCHECKS')
COVERAGE = _AttrCondition('RUN_COVERAGE')
RESOLVER_NOT_SYSTEM = _AttrCondition('RESOLVER_NOT_SYSTEM')
BIT_64 = ConstantCondition(struct.calcsize('P') * 8 == 64, 'BIT_64')
PY380_EXACTLY = ConstantCondition(sys.version_info[:3] == (3, 8, 0), 'PY380_EXACTLY')
PY312B3_EXACTLY = ConstantCondition(sys.version_info == (3, 12, 0, 'beta', 3))
PY312B4_EXACTLY = ConstantCondition(sys.version_info == (3, 12, 0, 'beta', 4))
PY313LT5 = ConstantCondition(
sys.version_info[:2] == (3, 13)
and sys.version_info[2] < 5
)
| _AttrCondition |
python | redis__redis-py | redis/commands/cluster.py | {
"start": 11809,
"end": 24955
} | class ____(ManagementCommands):
"""
A class for Redis Cluster management commands
The class inherits from Redis's core ManagementCommands class and do the
required adjustments to work with cluster mode
"""
def slaveof(self, *args, **kwargs) -> NoReturn:
"""
Make the server a replica of another instance, or promote it as master.
For more information see https://redis.io/commands/slaveof
"""
raise RedisClusterException("SLAVEOF is not supported in cluster mode")
def replicaof(self, *args, **kwargs) -> NoReturn:
"""
Make the server a replica of another instance, or promote it as master.
For more information see https://redis.io/commands/replicaof
"""
raise RedisClusterException("REPLICAOF is not supported in cluster mode")
def swapdb(self, *args, **kwargs) -> NoReturn:
"""
Swaps two Redis databases.
For more information see https://redis.io/commands/swapdb
"""
raise RedisClusterException("SWAPDB is not supported in cluster mode")
def cluster_myid(self, target_node: "TargetNodesT") -> ResponseT:
"""
Returns the node's id.
:target_node: 'ClusterNode'
The node to execute the command on
For more information check https://redis.io/commands/cluster-myid/
"""
return self.execute_command("CLUSTER MYID", target_nodes=target_node)
def cluster_addslots(
self, target_node: "TargetNodesT", *slots: EncodableT
) -> ResponseT:
"""
Assign new hash slots to receiving node. Sends to specified node.
:target_node: 'ClusterNode'
The node to execute the command on
For more information see https://redis.io/commands/cluster-addslots
"""
return self.execute_command(
"CLUSTER ADDSLOTS", *slots, target_nodes=target_node
)
def cluster_addslotsrange(
self, target_node: "TargetNodesT", *slots: EncodableT
) -> ResponseT:
"""
Similar to the CLUSTER ADDSLOTS command.
The difference between the two commands is that ADDSLOTS takes a list of slots
to assign to the node, while ADDSLOTSRANGE takes a list of slot ranges
(specified by start and end slots) to assign to the node.
:target_node: 'ClusterNode'
The node to execute the command on
For more information see https://redis.io/commands/cluster-addslotsrange
"""
return self.execute_command(
"CLUSTER ADDSLOTSRANGE", *slots, target_nodes=target_node
)
def cluster_countkeysinslot(self, slot_id: int) -> ResponseT:
"""
Return the number of local keys in the specified hash slot
Send to node based on specified slot_id
For more information see https://redis.io/commands/cluster-countkeysinslot
"""
return self.execute_command("CLUSTER COUNTKEYSINSLOT", slot_id)
def cluster_count_failure_report(self, node_id: str) -> ResponseT:
"""
Return the number of failure reports active for a given node
Sends to a random node
For more information see https://redis.io/commands/cluster-count-failure-reports
"""
return self.execute_command("CLUSTER COUNT-FAILURE-REPORTS", node_id)
def cluster_delslots(self, *slots: EncodableT) -> List[bool]:
"""
Set hash slots as unbound in the cluster.
It determines by it self what node the slot is in and sends it there
Returns a list of the results for each processed slot.
For more information see https://redis.io/commands/cluster-delslots
"""
return [self.execute_command("CLUSTER DELSLOTS", slot) for slot in slots]
def cluster_delslotsrange(self, *slots: EncodableT) -> ResponseT:
"""
Similar to the CLUSTER DELSLOTS command.
The difference is that CLUSTER DELSLOTS takes a list of hash slots to remove
from the node, while CLUSTER DELSLOTSRANGE takes a list of slot ranges to remove
from the node.
For more information see https://redis.io/commands/cluster-delslotsrange
"""
return self.execute_command("CLUSTER DELSLOTSRANGE", *slots)
def cluster_failover(
self, target_node: "TargetNodesT", option: Optional[str] = None
) -> ResponseT:
"""
Forces a slave to perform a manual failover of its master
Sends to specified node
:target_node: 'ClusterNode'
The node to execute the command on
For more information see https://redis.io/commands/cluster-failover
"""
if option:
if option.upper() not in ["FORCE", "TAKEOVER"]:
raise RedisError(
f"Invalid option for CLUSTER FAILOVER command: {option}"
)
else:
return self.execute_command(
"CLUSTER FAILOVER", option, target_nodes=target_node
)
else:
return self.execute_command("CLUSTER FAILOVER", target_nodes=target_node)
def cluster_info(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT:
"""
Provides info about Redis Cluster node state.
The command will be sent to a random node in the cluster if no target
node is specified.
For more information see https://redis.io/commands/cluster-info
"""
return self.execute_command("CLUSTER INFO", target_nodes=target_nodes)
def cluster_keyslot(self, key: str) -> ResponseT:
"""
Returns the hash slot of the specified key
Sends to random node in the cluster
For more information see https://redis.io/commands/cluster-keyslot
"""
return self.execute_command("CLUSTER KEYSLOT", key)
def cluster_meet(
self, host: str, port: int, target_nodes: Optional["TargetNodesT"] = None
) -> ResponseT:
"""
Force a node cluster to handshake with another node.
Sends to specified node.
For more information see https://redis.io/commands/cluster-meet
"""
return self.execute_command(
"CLUSTER MEET", host, port, target_nodes=target_nodes
)
def cluster_nodes(self) -> ResponseT:
"""
Get Cluster config for the node.
Sends to random node in the cluster
For more information see https://redis.io/commands/cluster-nodes
"""
return self.execute_command("CLUSTER NODES")
def cluster_replicate(
self, target_nodes: "TargetNodesT", node_id: str
) -> ResponseT:
"""
Reconfigure a node as a slave of the specified master node
For more information see https://redis.io/commands/cluster-replicate
"""
return self.execute_command(
"CLUSTER REPLICATE", node_id, target_nodes=target_nodes
)
def cluster_reset(
self, soft: bool = True, target_nodes: Optional["TargetNodesT"] = None
) -> ResponseT:
"""
Reset a Redis Cluster node
If 'soft' is True then it will send 'SOFT' argument
If 'soft' is False then it will send 'HARD' argument
For more information see https://redis.io/commands/cluster-reset
"""
return self.execute_command(
"CLUSTER RESET", b"SOFT" if soft else b"HARD", target_nodes=target_nodes
)
def cluster_save_config(
self, target_nodes: Optional["TargetNodesT"] = None
) -> ResponseT:
"""
Forces the node to save cluster state on disk
For more information see https://redis.io/commands/cluster-saveconfig
"""
return self.execute_command("CLUSTER SAVECONFIG", target_nodes=target_nodes)
def cluster_get_keys_in_slot(self, slot: int, num_keys: int) -> ResponseT:
"""
Returns the number of keys in the specified cluster slot
For more information see https://redis.io/commands/cluster-getkeysinslot
"""
return self.execute_command("CLUSTER GETKEYSINSLOT", slot, num_keys)
def cluster_set_config_epoch(
self, epoch: int, target_nodes: Optional["TargetNodesT"] = None
) -> ResponseT:
"""
Set the configuration epoch in a new node
For more information see https://redis.io/commands/cluster-set-config-epoch
"""
return self.execute_command(
"CLUSTER SET-CONFIG-EPOCH", epoch, target_nodes=target_nodes
)
def cluster_setslot(
self, target_node: "TargetNodesT", node_id: str, slot_id: int, state: str
) -> ResponseT:
"""
Bind an hash slot to a specific node
:target_node: 'ClusterNode'
The node to execute the command on
For more information see https://redis.io/commands/cluster-setslot
"""
if state.upper() in ("IMPORTING", "NODE", "MIGRATING"):
return self.execute_command(
"CLUSTER SETSLOT", slot_id, state, node_id, target_nodes=target_node
)
elif state.upper() == "STABLE":
raise RedisError('For "stable" state please use cluster_setslot_stable')
else:
raise RedisError(f"Invalid slot state: {state}")
def cluster_setslot_stable(self, slot_id: int) -> ResponseT:
"""
Clears migrating / importing state from the slot.
It determines by it self what node the slot is in and sends it there.
For more information see https://redis.io/commands/cluster-setslot
"""
return self.execute_command("CLUSTER SETSLOT", slot_id, "STABLE")
def cluster_replicas(
self, node_id: str, target_nodes: Optional["TargetNodesT"] = None
) -> ResponseT:
"""
Provides a list of replica nodes replicating from the specified primary
target node.
For more information see https://redis.io/commands/cluster-replicas
"""
return self.execute_command(
"CLUSTER REPLICAS", node_id, target_nodes=target_nodes
)
def cluster_slots(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT:
"""
Get array of Cluster slot to node mappings
For more information see https://redis.io/commands/cluster-slots
"""
return self.execute_command("CLUSTER SLOTS", target_nodes=target_nodes)
def cluster_shards(self, target_nodes=None):
"""
Returns details about the shards of the cluster.
For more information see https://redis.io/commands/cluster-shards
"""
return self.execute_command("CLUSTER SHARDS", target_nodes=target_nodes)
def cluster_myshardid(self, target_nodes=None):
"""
Returns the shard ID of the node.
For more information see https://redis.io/commands/cluster-myshardid/
"""
return self.execute_command("CLUSTER MYSHARDID", target_nodes=target_nodes)
def cluster_links(self, target_node: "TargetNodesT") -> ResponseT:
"""
Each node in a Redis Cluster maintains a pair of long-lived TCP link with each
peer in the cluster: One for sending outbound messages towards the peer and one
for receiving inbound messages from the peer.
This command outputs information of all such peer links as an array.
For more information see https://redis.io/commands/cluster-links
"""
return self.execute_command("CLUSTER LINKS", target_nodes=target_node)
def cluster_flushslots(self, target_nodes: Optional["TargetNodesT"] = None) -> None:
raise NotImplementedError(
"CLUSTER FLUSHSLOTS is intentionally not implemented in the client."
)
def cluster_bumpepoch(self, target_nodes: Optional["TargetNodesT"] = None) -> None:
raise NotImplementedError(
"CLUSTER BUMPEPOCH is intentionally not implemented in the client."
)
def readonly(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT:
"""
Enables read queries.
The command will be sent to the default cluster node if target_nodes is
not specified.
For more information see https://redis.io/commands/readonly
"""
if target_nodes == "replicas" or target_nodes == "all":
# read_from_replicas will only be enabled if the READONLY command
# is sent to all replicas
self.read_from_replicas = True
return self.execute_command("READONLY", target_nodes=target_nodes)
def readwrite(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT:
"""
Disables read queries.
The command will be sent to the default cluster node if target_nodes is
not specified.
For more information see https://redis.io/commands/readwrite
"""
# Reset read from replicas flag
self.read_from_replicas = False
return self.execute_command("READWRITE", target_nodes=target_nodes)
| ClusterManagementCommands |
python | doocs__leetcode | solution/0500-0599/0543.Diameter of Binary Tree/Solution.py | {
"start": 192,
"end": 578
} | class ____:
def diameterOfBinaryTree(self, root: Optional[TreeNode]) -> int:
def dfs(root: Optional[TreeNode]) -> int:
if root is None:
return 0
l, r = dfs(root.left), dfs(root.right)
nonlocal ans
ans = max(ans, l + r)
return 1 + max(l, r)
ans = 0
dfs(root)
return ans
| Solution |
python | ansible__ansible | test/lib/ansible_test/_internal/dev/container_probe.py | {
"start": 921,
"end": 7820
} | class ____:
"""Details on a cgroup mount point that is expected to be present in the container."""
path: str
type: t.Optional[str]
writable: t.Optional[bool]
state: t.Optional[CGroupState]
def __post_init__(self):
assert pathlib.PurePosixPath(self.path).is_relative_to(CGroupPath.ROOT)
if self.type is None:
assert self.state is None
elif self.type == MountType.TMPFS:
assert self.writable is True
assert self.state is None
else:
assert self.type in (MountType.CGROUP_V1, MountType.CGROUP_V2)
assert self.state is not None
def check_container_cgroup_status(args: EnvironmentConfig, config: DockerConfig, container_name: str, expected_mounts: tuple[CGroupMount, ...]) -> None:
"""Check the running container to examine the state of the cgroup hierarchies."""
cmd = ['sh', '-c', 'cat /proc/1/cgroup && echo && cat /proc/1/mountinfo']
stdout = docker_exec(args, container_name, cmd, capture=True)[0]
cgroups_stdout, mounts_stdout = stdout.split('\n\n')
cgroups = CGroupEntry.loads(cgroups_stdout)
mounts = MountEntry.loads(mounts_stdout)
mounts = tuple(mount for mount in mounts if mount.path.is_relative_to(CGroupPath.ROOT))
mount_cgroups: dict[MountEntry, CGroupEntry] = {}
probe_paths: dict[pathlib.PurePosixPath, t.Optional[str]] = {}
for cgroup in cgroups:
if cgroup.subsystem:
mount = ([mount for mount in mounts if
mount.type == MountType.CGROUP_V1 and
mount.path.is_relative_to(cgroup.root_path) and
cgroup.full_path.is_relative_to(mount.path)
] or [None])[-1]
else:
mount = ([mount for mount in mounts if
mount.type == MountType.CGROUP_V2 and
mount.path == cgroup.root_path
] or [None])[-1]
if mount:
mount_cgroups[mount] = cgroup
for mount in mounts:
probe_paths[mount.path] = None
if (cgroup := mount_cgroups.get(mount)) and cgroup.full_path != mount.path: # child of mount.path
probe_paths[cgroup.full_path] = None
probe_script = read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'probe_cgroups.py'))
probe_command = [config.python.path, '-', f'{container_name}-probe'] + [str(path) for path in probe_paths]
probe_results = json.loads(docker_exec(args, container_name, probe_command, capture=True, data=probe_script)[0])
for path in probe_paths:
probe_paths[path] = probe_results[str(path)]
remaining_mounts: dict[pathlib.PurePosixPath, MountEntry] = {mount.path: mount for mount in mounts}
results: dict[pathlib.PurePosixPath, tuple[bool, str]] = {}
for expected_mount in expected_mounts:
expected_path = pathlib.PurePosixPath(expected_mount.path)
if not (actual_mount := remaining_mounts.pop(expected_path, None)):
results[expected_path] = (False, 'not mounted')
continue
actual_mount_write_error = probe_paths[actual_mount.path]
actual_mount_errors = []
if cgroup := mount_cgroups.get(actual_mount):
if expected_mount.state == CGroupState.SHADOWED:
actual_mount_errors.append('unexpected cgroup association')
if cgroup.root_path == cgroup.full_path and expected_mount.state == CGroupState.HOST:
results[cgroup.root_path.joinpath('???')] = (False, 'missing cgroup')
if cgroup.full_path == actual_mount.path:
if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE:
actual_mount_errors.append('unexpected mount')
else:
cgroup_write_error = probe_paths[cgroup.full_path]
cgroup_errors = []
if expected_mount.state == CGroupState.SHADOWED:
cgroup_errors.append('unexpected cgroup association')
if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE:
cgroup_errors.append('unexpected cgroup')
if cgroup_write_error:
cgroup_errors.append(cgroup_write_error)
if cgroup_errors:
results[cgroup.full_path] = (False, f'directory errors: {", ".join(cgroup_errors)}')
else:
results[cgroup.full_path] = (True, 'directory (writable)')
elif expected_mount.state not in (None, CGroupState.SHADOWED):
actual_mount_errors.append('missing cgroup association')
if actual_mount.type != expected_mount.type and expected_mount.type is not None:
actual_mount_errors.append(f'type not {expected_mount.type}')
if bool(actual_mount_write_error) == expected_mount.writable:
actual_mount_errors.append(f'{actual_mount_write_error or "writable"}')
if actual_mount_errors:
results[actual_mount.path] = (False, f'{actual_mount.type} errors: {", ".join(actual_mount_errors)}')
else:
results[actual_mount.path] = (True, f'{actual_mount.type} ({actual_mount_write_error or "writable"})')
for remaining_mount in remaining_mounts.values():
remaining_mount_write_error = probe_paths[remaining_mount.path]
results[remaining_mount.path] = (False, f'unexpected {remaining_mount.type} mount ({remaining_mount_write_error or "writable"})')
identity = get_identity(args, config, container_name)
messages: list[tuple[pathlib.PurePosixPath, bool, str]] = [(path, result[0], result[1]) for path, result in sorted(results.items())]
message = '\n'.join(f'{"PASS" if result else "FAIL"}: {path} -> {message}' for path, result, message in messages)
display.info(f'>>> Container: {identity}\n{message.rstrip()}')
if args.dev_probe_cgroups:
write_text_file(os.path.join(args.dev_probe_cgroups, f'{identity}.log'), message)
def get_identity(args: EnvironmentConfig, config: DockerConfig, container_name: str) -> str:
"""Generate and return an identity string to use when logging test results."""
engine = require_docker().command
try:
loginuid = int(read_text_file('/proc/self/loginuid'))
except FileNotFoundError:
loginuid = LOGINUID_NOT_SET
user = pwd.getpwuid(os.getuid()).pw_name
login_user = user if loginuid == LOGINUID_NOT_SET else pwd.getpwuid(loginuid).pw_name
remote = engine == 'podman' and get_podman_remote()
tags = (
config.name,
engine,
f'cgroup={config.cgroup.value}@{get_docker_info(args).cgroup_version}',
f'remote={remote}',
f'user={user}',
f'loginuid={login_user}',
container_name,
)
return '|'.join(tags)
| CGroupMount |
python | getsentry__sentry | tests/sentry/templatetags/test_sentry_features.py | {
"start": 83,
"end": 806
} | class ____(TestCase):
# get a backend-dependent Template, just like get_template in >= Django 1.8
TEMPLATE = engines["django"].from_string(
"""
{% load sentry_features %}
{% feature auth:register %}
<span>register</span>
{% else %}
<span>nope</span>
{% endfeature %}
"""
)
def test_enabled(self) -> None:
with self.feature("auth:register"):
result = self.TEMPLATE.render()
assert "<span>register</span>" in result
def test_disabled(self) -> None:
with self.feature({"auth:register": False}):
result = self.TEMPLATE.render()
assert "<span>nope</span>" in result
| FeaturesTest |
python | mwaskom__seaborn | seaborn/_base.py | {
"start": 10211,
"end": 18062
} | class ____(SemanticMapping):
"""Mapping that sets artist sizes according to data values."""
# An object that normalizes data values to [0, 1] range
norm = None
def __init__(
self, plotter, sizes=None, order=None, norm=None,
):
"""Map the levels of the `size` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data.get("size", pd.Series(dtype=float))
if data.notna().any():
map_type = self.infer_map_type(
norm, sizes, plotter.var_types["size"]
)
# --- Option 1: numeric mapping
if map_type == "numeric":
levels, lookup_table, norm, size_range = self.numeric_mapping(
data, sizes, norm,
)
# --- Option 2: categorical mapping
elif map_type == "categorical":
levels, lookup_table = self.categorical_mapping(
data, sizes, order,
)
size_range = None
# --- Option 3: datetime mapping
# TODO this needs an actual implementation
else:
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), sizes, order,
)
size_range = None
self.map_type = map_type
self.levels = levels
self.norm = norm
self.sizes = sizes
self.size_range = size_range
self.lookup_table = lookup_table
def infer_map_type(self, norm, sizes, var_type):
if norm is not None:
map_type = "numeric"
elif isinstance(sizes, (dict, list)):
map_type = "categorical"
else:
map_type = var_type
return map_type
def _lookup_single(self, key):
try:
value = self.lookup_table[key]
except KeyError:
normed = self.norm(key)
if np.ma.is_masked(normed):
normed = np.nan
value = self.size_range[0] + normed * np.ptp(self.size_range)
return value
def categorical_mapping(self, data, sizes, order):
levels = categorical_order(data, order)
if isinstance(sizes, dict):
# Dict inputs map existing data values to the size attribute
missing = set(levels) - set(sizes)
if any(missing):
err = f"Missing sizes for the following levels: {missing}"
raise ValueError(err)
lookup_table = sizes.copy()
elif isinstance(sizes, list):
# List inputs give size values in the same order as the levels
sizes = self._check_list_length(levels, sizes, "sizes")
lookup_table = dict(zip(levels, sizes))
else:
if isinstance(sizes, tuple):
# Tuple input sets the min, max size values
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# Otherwise, we need to get the min, max size values from
# the plotter object we are attached to.
# TODO this is going to cause us trouble later, because we
# want to restructure things so that the plotter is generic
# across the visual representation of the data. But at this
# point, we don't know the visual representation. Likely we
# want to change the logic of this Mapping so that it gives
# points on a normalized range that then gets un-normalized
# when we know what we're drawing. But given the way the
# package works now, this way is cleanest.
sizes = self.plotter._default_size_range
# For categorical sizes, use regularly-spaced linear steps
# between the minimum and maximum sizes. Then reverse the
# ramp so that the largest value is used for the first entry
# in size_order, etc. This is because "ordered" categories
# are often though to go in decreasing priority.
sizes = np.linspace(*sizes, len(levels))[::-1]
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table
def numeric_mapping(self, data, sizes, norm):
if isinstance(sizes, dict):
# The presence of a norm object overrides a dictionary of sizes
# in specifying a numeric mapping, so we need to process it
# dictionary here
levels = list(np.sort(list(sizes)))
size_values = sizes.values()
size_range = min(size_values), max(size_values)
else:
# The levels here will be the unique values in the data
levels = list(np.sort(remove_na(data.unique())))
if isinstance(sizes, tuple):
# For numeric inputs, the size can be parametrized by
# the minimum and maximum artist values to map to. The
# norm object that gets set up next specifies how to
# do the mapping.
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
size_range = sizes
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# When not provided, we get the size range from the plotter
# object we are attached to. See the note in the categorical
# method about how this is suboptimal for future development.
size_range = self.plotter._default_size_range
# Now that we know the minimum and maximum sizes that will get drawn,
# we need to map the data values that we have into that range. We will
# use a matplotlib Normalize class, which is typically used for numeric
# color mapping but works fine here too. It takes data values and maps
# them into a [0, 1] interval, potentially nonlinear-ly.
if norm is None:
# Default is a linear function between the min and max data values
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
# It is also possible to give different limits in data space
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = f"Value for size `norm` parameter not understood: {norm}"
raise ValueError(err)
else:
# If provided with Normalize object, copy it so we can modify
norm = copy(norm)
# Set the mapping so all output values are in [0, 1]
norm.clip = True
# If the input range is not set, use the full range of the data
if not norm.scaled():
norm(levels)
# Map from data values to [0, 1] range
sizes_scaled = norm(levels)
# Now map from the scaled range into the artist units
if isinstance(sizes, dict):
lookup_table = sizes
else:
lo, hi = size_range
sizes = lo + sizes_scaled * (hi - lo)
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table, norm, size_range
| SizeMapping |
python | pytorch__pytorch | torch/_inductor/cache.py | {
"start": 8571,
"end": 14046
} | class ____(AsyncCache[Key, Value]):
"""
On-disk cache implementation using files and file locks.
Stores cache data in files on disk, with atomic operations and versioning.
Supports custom cache directory names.
Attributes:
version (int): The version used for cache versioning.
name (str): The name of the cache directory.
"""
version: int = 0
def __init__(self: Self, name: str | None = None) -> None:
"""
Initialize an on-disk cache instance.
Args:
name (str | None, optional): The name of the cache directory. If None,
defaults to "on_disk_cache".
"""
self.name = name or "on_disk_cache"
@cached_property
def base_dir(self: Self) -> Path:
"""
Get the base directory for the cache.
Returns:
Path: The base directory path for storing cache files.
"""
return Path(gettempdir()) / "cache" / self.name
def _fpath_from_key(self: Self, key: Key) -> Path:
"""
Get the file path for a given key.
Args:
key (Key): The key to convert to a file path.
Returns:
Path: The file path for the key.
Raises:
CacheError: If the key is not pickle-able.
"""
try:
return self.base_dir / sha256(pickle.dumps(key)).hexdigest()[:32]
except (AttributeError, pickle.PicklingError) as err:
raise CacheError(
f"Failed to get fpath for key {key!r}, key is not pickle-able."
) from err
# pyrefly: ignore [bad-argument-type]
assert_never(key)
def _flock_from_fpath(self: Self, fpath: Path) -> FileLock:
"""
Get a file lock for a given file path.
Args:
fpath (Path): The file path.
Returns:
FileLock: The file lock for the path.
"""
# fpath.name is a hex digest, meaning there are 16^4 potential values
# for fpath.name[:4]; this is more than enough unique locks to not
# cause additional overhead from shared locks and it also saves our
# cache dir from becoming 50 percent locks
# pyrefly: ignore [bad-return]
return FileLock(str(fpath.parent / "locks" / fpath.name[:4]) + ".lock")
@property
def version_prefix(self: Self) -> bytes:
"""
Get the version prefix for the cache.
Returns:
bytes: The version prefix as bytes, derived from the cache version string.
"""
return sha256(str(OnDiskCache.version).encode()).digest()[:4]
@override
def get(self: Self, key: Key) -> Value | None:
"""
Retrieve a value from the cache.
Args:
key (Key): The key to look up.
Returns:
Value | None: The cached value if present and version matches, else None.
Raises:
CacheError: If the value is corrupted or cannot be unpickled.
Side Effects:
Removes stale cache files if the version prefix does not match.
"""
fpath = self._fpath_from_key(key)
flock = self._flock_from_fpath(fpath)
with flock:
if not fpath.is_file():
return None
value_bytes = None
prefix_length = len(self.version_prefix)
with open(fpath, "rb") as fp:
if fp.read(prefix_length) == self.version_prefix:
value_bytes = fp.read()
if value_bytes is None:
# version_prefix did not match, so we can't read the stale
# cached value; we should also remove the stale cached value,
# so that key can be re-cached by the newer version
fpath.unlink()
return None
try:
value = pickle.loads(value_bytes)
except pickle.UnpicklingError as err:
raise CacheError(
f"Failed to get key {key!r}, value is potentially corrupted (value is not un-pickle-able)."
) from err
return value
@override
def insert(self: Self, key: Key, value: Value) -> bool:
"""
Insert a value into the cache.
Args:
key (Key): The key to insert.
value (Value): The value to associate with the key.
Returns:
bool: True if the value was inserted, False if the key already exists.
Raises:
CacheError: If the value is not pickle-able.
Side Effects:
Creates the cache directory if it does not exist.
"""
fpath = self._fpath_from_key(key)
flock = self._flock_from_fpath(fpath)
fpath.parent.mkdir(parents=True, exist_ok=True)
try:
# "x" mode is exclusive creation, meaning the file will be created
# iff the file does not already exist (atomic w/o overwrite); use
# flock for added atomicity guarantee and to prevent partial writes
with flock as _, open(fpath, "xb") as fp:
fp.write(self.version_prefix)
pickle.dump(value, fp)
except pickle.PicklingError as err:
raise CacheError(
f"Failed to insert key {key!r} with value {value!r}, value is not pickle-able."
) from err
except FileExistsError:
return False
return True
| OnDiskCache |
python | sqlalchemy__sqlalchemy | test/orm/test_dynamic.py | {
"start": 41744,
"end": 45631
} | class ____(
_DynamicFixture,
_UOWTests,
_fixtures.FixtureTest,
testing.AssertsExecutionResults,
):
run_inserts = None
@testing.combinations(
"empty", "persistent", "transient", argnames="merge_type"
)
def test_merge_persistent(self, merge_type, user_address_fixture):
addresses = self.tables.addresses
User, Address = user_address_fixture(
addresses_args={"order_by": addresses.c.email_address}
)
sess = fixture_session(autoflush=False)
a1 = Address(email_address="a1")
a2 = Address(email_address="a2")
a3 = Address(email_address="a3")
u1 = User(name="jack", addresses=[a2, a3])
if merge_type == "transient":
# merge transient. no collection iteration is implied by this.
u1 = sess.merge(u1)
sess.add(a1)
else:
sess.add_all([u1, a1])
sess.flush()
if merge_type == "persistent":
u1 = User(id=u1.id, name="jane", addresses=[a1, a3])
# for Dynamic, the list is iterated. it's been this way the
# whole time, which is clearly not very useful for a
# "collection that's too large to load". however we maintain
# legacy behavior here
u1 = sess.merge(u1)
eq_(attributes.get_history(u1, "addresses"), ([a1], [a3], [a2]))
sess.flush()
if self.lazy == "dynamic":
stmt = u1.addresses.statement
else:
stmt = u1.addresses.select()
eq_(sess.scalars(stmt).all(), [a1, a3])
elif merge_type == "empty":
# merge while omitting the "too large to load" collection
# works fine.
u1 = User(id=u1.id, name="jane")
u1 = sess.merge(u1)
eq_(attributes.get_history(u1, "addresses"), ([], [a2, a3], []))
sess.flush()
if self.lazy == "dynamic":
stmt = u1.addresses.statement
else:
stmt = u1.addresses.select()
eq_(sess.scalars(stmt).all(), [a2, a3])
@testing.combinations(True, False, argnames="delete_cascade_configured")
def test_delete_cascade(
self, delete_cascade_configured, user_address_fixture
):
addresses = self.tables.addresses
User, Address = user_address_fixture(
addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": (
"save-update"
if not delete_cascade_configured
else "all, delete"
),
}
)
sess = fixture_session(
autoflush=True,
)
u = User(name="ed")
u.addresses.add_all(
[Address(email_address=letter) for letter in "abcdef"]
)
sess.add(u)
sess.commit()
from sqlalchemy import case
# the byzantine syntax here is so the query works on MSSQL
isnull_stmt = select(
case((addresses.c.user_id == None, True), else_=False),
func.count("*"),
).group_by(
case((addresses.c.user_id == None, True), else_=False),
addresses.c.user_id,
)
eq_(
{isnull: count for isnull, count in sess.execute(isnull_stmt)},
{False: 6},
)
sess.delete(u)
sess.commit()
if not delete_cascade_configured:
eq_(
{isnull: count for isnull, count in sess.execute(isnull_stmt)},
{True: 6},
)
else:
eq_(
sess.connection()
.execute(select(func.count("*")).select_from(addresses))
.scalar(),
0,
)
| DynamicUOWTest |
python | ansible__ansible | test/lib/ansible_test/_internal/ci/local.py | {
"start": 4477,
"end": 6690
} | class ____:
"""Change information for local work."""
def __init__(self, args: TestConfig) -> None:
self.args = args
self.git = Git()
self.current_branch = self.git.get_branch()
if self.is_official_branch(self.current_branch):
raise InvalidBranch(branch=self.current_branch,
reason='Current branch is not a feature branch.')
self.fork_branch = None
self.fork_point = None
self.local_branches = sorted(self.git.get_branches())
self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
for self.fork_branch in self.official_branches:
try:
self.fork_point = self.git.get_branch_fork_point(self.fork_branch)
break
except SubprocessError:
pass
if self.fork_point is None:
raise ApplicationError('Unable to auto-detect fork branch and fork point.')
# tracked files (including unchanged)
self.tracked = sorted(self.git.get_file_names(['--cached']))
# untracked files (except ignored)
self.untracked = sorted(self.git.get_file_names(['--others', '--exclude-standard']))
# tracked changes (including deletions) committed since the branch was forked
self.committed = sorted(self.git.get_diff_names([self.fork_point, 'HEAD']))
# tracked changes (including deletions) which are staged
self.staged = sorted(self.git.get_diff_names(['--cached']))
# tracked changes (including deletions) which are not staged
self.unstaged = sorted(self.git.get_diff_names([]))
# diff of all tracked files from fork point to working copy
self.diff = self.git.get_diff([self.fork_point])
def is_official_branch(self, name: str) -> bool:
"""Return True if the given branch name an official branch for development or releases."""
if self.args.base_branch:
return name == self.args.base_branch
if name == 'devel':
return True
if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
return True
return False
| LocalChanges |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_high_low_lines02.py | {
"start": 315,
"end": 1607
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_high_low_lines02.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with high-low lines."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [61180928, 63898368]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.set_high_low_lines({"line": {"color": "red", "dash_type": "square_dot"}})
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | jschneier__django-storages | tests/test_gcloud.py | {
"start": 22752,
"end": 25400
} | class ____(GCloudTestCase):
def setUp(self):
super().setUp()
self.storage.gzip = True
@mock.patch("google.cloud.storage.blob.Blob._do_upload")
def test_storage_save_gzipped(self, *args):
"""
Test saving a gzipped file
"""
name = "test_storage_save.css.gz"
content = ContentFile("I am gzip'd", name=name)
blob = Blob("x", None)
blob.upload_from_file = mock.MagicMock(side_effect=blob.upload_from_file)
patcher = mock.patch("google.cloud.storage.Bucket.get_blob", return_value=blob)
try:
patcher.start()
self.storage.save(name, content)
obj = self.storage._bucket.get_blob()
obj.upload_from_file.assert_called_with(
mock.ANY,
rewind=True,
retry=DEFAULT_RETRY,
size=11,
predefined_acl=None,
content_type="text/css",
)
finally:
patcher.stop()
@mock.patch("google.cloud.storage.blob.Blob._do_upload")
def test_storage_save_gzip(self, *args):
"""
Test saving a file with gzip enabled.
"""
name = "test_storage_save.css"
content = ContentFile("I should be gzip'd")
blob = Blob("x", None)
blob.upload_from_file = mock.MagicMock(side_effect=blob.upload_from_file)
patcher = mock.patch("google.cloud.storage.Bucket.get_blob", return_value=blob)
try:
patcher.start()
self.storage.save(name, content)
obj = self.storage._bucket.get_blob()
obj.upload_from_file.assert_called_with(
mock.ANY,
rewind=True,
retry=DEFAULT_RETRY,
size=None,
predefined_acl=None,
content_type="text/css",
)
args, kwargs = obj.upload_from_file.call_args
content = args[0]
zfile = gzip.GzipFile(mode="rb", fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
finally:
patcher.stop()
def test_storage_read_gzip(self, *args):
"""
Test reading a gzipped file decompresses content only once.
"""
name = "test_storage_save.css"
file = GoogleCloudFile(name, "rb", self.storage)
blob = mock.MagicMock()
file.blob = blob
blob.download_to_file = lambda f, checksum=None: f.write(b"No gzip")
blob.content_encoding = "gzip"
f = file._get_file()
f.read() # This should not fail
| GoogleCloudGzipClientTests |
python | networkx__networkx | networkx/algorithms/tests/test_clique.py | {
"start": 7547,
"end": 9772
} | class ____:
def test_paper_figure_4(self):
# Same graph as given in Fig. 4 of paper enumerate_all_cliques is
# based on.
# http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129
G = nx.Graph()
edges_fig_4 = [
("a", "b"),
("a", "c"),
("a", "d"),
("a", "e"),
("b", "c"),
("b", "d"),
("b", "e"),
("c", "d"),
("c", "e"),
("d", "e"),
("f", "b"),
("f", "c"),
("f", "g"),
("g", "f"),
("g", "c"),
("g", "d"),
("g", "e"),
]
G.add_edges_from(edges_fig_4)
cliques = list(nx.enumerate_all_cliques(G))
clique_sizes = list(map(len, cliques))
assert sorted(clique_sizes) == clique_sizes
expected_cliques = [
["a"],
["b"],
["c"],
["d"],
["e"],
["f"],
["g"],
["a", "b"],
["a", "b", "d"],
["a", "b", "d", "e"],
["a", "b", "e"],
["a", "c"],
["a", "c", "d"],
["a", "c", "d", "e"],
["a", "c", "e"],
["a", "d"],
["a", "d", "e"],
["a", "e"],
["b", "c"],
["b", "c", "d"],
["b", "c", "d", "e"],
["b", "c", "e"],
["b", "c", "f"],
["b", "d"],
["b", "d", "e"],
["b", "e"],
["b", "f"],
["c", "d"],
["c", "d", "e"],
["c", "d", "e", "g"],
["c", "d", "g"],
["c", "e"],
["c", "e", "g"],
["c", "f"],
["c", "f", "g"],
["c", "g"],
["d", "e"],
["d", "e", "g"],
["d", "g"],
["e", "g"],
["f", "g"],
["a", "b", "c"],
["a", "b", "c", "d"],
["a", "b", "c", "d", "e"],
["a", "b", "c", "e"],
]
assert sorted(map(sorted, cliques)) == sorted(map(sorted, expected_cliques))
| TestEnumerateAllCliques |
python | PrefectHQ__prefect | tests/client/test_prefect_client.py | {
"start": 64033,
"end": 72543
} | class ____:
@pytest.fixture
async def deployment(self, prefect_client: PrefectClient):
foo = flow(lambda: None, name="foo")
flow_id = await prefect_client.create_flow(foo)
schedule = IntervalSchedule(
interval=timedelta(days=1), anchor_date=DateTime(2020, 1, 1)
)
deployment_id = await prefect_client.create_deployment(
flow_id=flow_id,
name="test-deployment",
schedules=[DeploymentScheduleCreate(schedule=schedule)],
parameters={"foo": "bar"},
work_queue_name="wq",
)
return deployment_id
async def test_create_then_read_work_queue(self, prefect_client: PrefectClient):
queue = await prefect_client.create_work_queue(name="foo")
assert isinstance(queue.id, UUID)
lookup = await prefect_client.read_work_queue(queue.id)
assert isinstance(lookup, WorkQueue)
assert lookup.name == "foo"
async def test_create_and_read_includes_status(self, prefect_client: PrefectClient):
queue = await prefect_client.create_work_queue(name="foo")
assert hasattr(queue, "status")
assert queue.status == "NOT_READY"
lookup = await prefect_client.read_work_queue(queue.id)
assert hasattr(lookup, "status")
assert lookup.status == "NOT_READY"
async def test_create_then_read_work_queue_by_name(
self, prefect_client: PrefectClient
):
queue = await prefect_client.create_work_queue(name="foo")
assert isinstance(queue.id, UUID)
lookup = await prefect_client.read_work_queue_by_name("foo")
assert lookup.name == "foo"
async def test_create_queue_with_settings(self, prefect_client: PrefectClient):
queue = await prefect_client.create_work_queue(
name="foo",
concurrency_limit=1,
is_paused=True,
priority=2,
description="such queue",
)
assert queue.concurrency_limit == 1
assert queue.is_paused is True
assert queue.priority == 2
assert queue.description == "such queue"
async def test_create_then_match_work_queues(self, prefect_client):
await prefect_client.create_work_queue(
name="one of these things is not like the other"
)
await prefect_client.create_work_queue(
name="one of these things just doesn't belong"
)
await prefect_client.create_work_queue(
name="can you tell which thing is not like the others"
)
matched_queues = await prefect_client.match_work_queues(["one of these things"])
assert len(matched_queues) == 2
async def test_read_nonexistant_work_queue(self, prefect_client):
with pytest.raises(prefect.exceptions.ObjectNotFound):
await prefect_client.read_work_queue_by_name("foo")
async def test_get_runs_from_queue_includes(self, prefect_client, deployment):
wq_1 = await prefect_client.read_work_queue_by_name(name="wq")
wq_2 = await prefect_client.create_work_queue(name="wq2")
run = await prefect_client.create_flow_run_from_deployment(deployment)
assert run.id
runs_1 = await prefect_client.get_runs_in_work_queue(wq_1.id)
assert runs_1[0].id == run.id
runs_2 = await prefect_client.get_runs_in_work_queue(wq_2.id)
assert runs_2 == []
async def test_get_runs_from_queue_respects_limit(self, prefect_client, deployment):
queue = await prefect_client.read_work_queue_by_name(name="wq")
runs = []
for _ in range(10):
run = await prefect_client.create_flow_run_from_deployment(deployment)
runs.append(run)
output = await prefect_client.get_runs_in_work_queue(queue.id, limit=1)
assert len(output) == 1
assert output[0].id in [r.id for r in runs]
output = await prefect_client.get_runs_in_work_queue(queue.id, limit=8)
assert len(output) == 8
assert {o.id for o in output} < {r.id for r in runs}
output = await prefect_client.get_runs_in_work_queue(queue.id, limit=20)
assert len(output) == 10
assert {o.id for o in output} == {r.id for r in runs}
async def test_delete_flow_run(prefect_client, flow_run):
# Note - the flow_run provided by the fixture is not of type `FlowRun`
print(f"Type: {type(flow_run)}")
# Make sure our flow exists (the read flow is of type `s.c.FlowRun`)
lookup = await prefect_client.read_flow_run(flow_run.id)
assert isinstance(lookup, client_schemas.FlowRun)
# Check delete works
await prefect_client.delete_flow_run(flow_run.id)
with pytest.raises(prefect.exceptions.ObjectNotFound):
await prefect_client.read_flow_run(flow_run.id)
# Check that trying to delete the deleted flow run raises an error
with pytest.raises(prefect.exceptions.ObjectNotFound):
await prefect_client.delete_flow_run(flow_run.id)
def test_server_type_ephemeral(enable_ephemeral_server):
prefect_client = get_client()
assert prefect_client.server_type == ServerType.EPHEMERAL
async def test_server_type_server(hosted_api_server):
async with PrefectClient(hosted_api_server) as prefect_client:
assert prefect_client.server_type == ServerType.SERVER
async def test_server_type_cloud():
async with PrefectClient(PREFECT_CLOUD_API_URL.value()) as prefect_client:
assert prefect_client.server_type == ServerType.CLOUD
@pytest.mark.parametrize(
"on_create, expected_value", [(True, True), (False, False), (None, False)]
)
async def test_update_deployment_does_not_overwrite_paused_when_not_provided(
prefect_client, flow_run, on_create, expected_value
):
deployment_id = await prefect_client.create_deployment(
flow_id=flow_run.flow_id,
name="test-deployment",
parameters={"foo": "bar"},
work_queue_name="wq",
paused=on_create,
)
# Check that paused is created as expected
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.paused == expected_value
# Only updating tags should not effect paused
await prefect_client.update_deployment(
deployment_id, client_schemas.actions.DeploymentUpdate(tags=["new-tag"])
)
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.paused == expected_value
@pytest.mark.parametrize(
"on_create, after_create, on_update, after_update",
[
(False, False, True, True),
(True, True, False, False),
(None, False, True, True),
],
)
async def test_update_deployment_paused(
prefect_client,
flow_run,
on_create,
after_create,
on_update,
after_update,
):
deployment_id = await prefect_client.create_deployment(
flow_id=flow_run.flow_id,
name="test-deployment",
parameters={"foo": "bar"},
work_queue_name="wq",
paused=on_create,
)
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.paused == after_create
await prefect_client.update_deployment(
deployment_id, client_schemas.actions.DeploymentUpdate(paused=on_update)
)
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.paused == after_update
async def test_pause_and_resume_deployment(prefect_client, flow_run):
# Create deployment in unpaused state
deployment_id = await prefect_client.create_deployment(
flow_id=flow_run.flow_id,
name="test-deployment",
paused=False,
)
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.paused is False
# Test pause with UUID
await prefect_client.pause_deployment(deployment_id)
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.paused is True
# Test resume with string ID
await prefect_client.resume_deployment(str(deployment_id))
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.paused is False
# Test error cases
with pytest.raises(ValueError, match="Invalid deployment ID"):
await prefect_client.pause_deployment("not-a-uuid")
fake_id = "00000000-0000-0000-0000-000000000000"
with pytest.raises(prefect.exceptions.ObjectNotFound):
await prefect_client.pause_deployment(fake_id)
| TestClientWorkQueues |
python | PyCQA__pylint | doc/data/messages/b/bad-mcs-classmethod-argument/bad.py | {
"start": 0,
"end": 101
} | class ____(type):
@classmethod
def foo(some): # [bad-mcs-classmethod-argument]
pass
| Meta |
python | kamyu104__LeetCode-Solutions | Python/shortest-path-to-get-food.py | {
"start": 37,
"end": 1018
} | class ____(object):
def getFood(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
q = []
for r in xrange(len(grid)):
for c in xrange(len(grid[0])):
if grid[r][c] == '*':
q.append((r, c))
break
result = 0
while q:
result += 1
new_q = []
for r, c in q:
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(grid) and
0 <= nc < len(grid[0]) and
grid[nr][nc] != 'X'):
continue
if grid[nr][nc] == '#':
return result
grid[nr][nc] = 'X'
new_q.append((nr, nc))
q = new_q
return -1
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ranges.py | {
"start": 22238,
"end": 27511
} | class ____(sqltypes.TypeEngine[_T]):
"""Base class for single and multi Range SQL types."""
render_bind_cast = True
operator_classes = OperatorClass.NUMERIC
__abstract__ = True
@overload
def adapt(self, cls: Type[_TE], **kw: Any) -> _TE: ...
@overload
def adapt(
self, cls: Type[TypeEngineMixin], **kw: Any
) -> TypeEngine[Any]: ...
def adapt(
self,
cls: Type[Union[TypeEngine[Any], TypeEngineMixin]],
**kw: Any,
) -> TypeEngine[Any]:
"""Dynamically adapt a range type to an abstract impl.
For example ``INT4RANGE().adapt(_Psycopg2NumericRange)`` should
produce a type that will have ``_Psycopg2NumericRange`` behaviors
and also render as ``INT4RANGE`` in SQL and DDL.
"""
if (
issubclass(cls, (AbstractSingleRangeImpl, AbstractMultiRangeImpl))
and cls is not self.__class__
):
# two ways to do this are: 1. create a new type on the fly
# or 2. have AbstractRangeImpl(visit_name) constructor and a
# visit_abstract_range_impl() method in the PG compiler.
# I'm choosing #1 as the resulting type object
# will then make use of the same mechanics
# as if we had made all these sub-types explicitly, and will
# also look more obvious under pdb etc.
# The adapt() operation here is cached per type-class-per-dialect,
# so is not much of a performance concern
visit_name = self.__visit_name__
return type( # type: ignore
f"{visit_name}RangeImpl",
(cls, self.__class__),
{"__visit_name__": visit_name},
)()
else:
return super().adapt(cls)
class comparator_factory(TypeEngine.Comparator[Range[Any]]):
"""Define comparison operations for range types."""
def contains(self, other: Any, **kw: Any) -> ColumnElement[bool]:
"""Boolean expression. Returns true if the right hand operand,
which can be an element or a range, is contained within the
column.
kwargs may be ignored by this operator but are required for API
conformance.
"""
return self.expr.operate(CONTAINS, other)
def contained_by(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Returns true if the column is contained
within the right hand operand.
"""
return self.expr.operate(CONTAINED_BY, other)
def overlaps(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Returns true if the column overlaps
(has points in common with) the right hand operand.
"""
return self.expr.operate(OVERLAP, other)
def strictly_left_of(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Returns true if the column is strictly
left of the right hand operand.
"""
return self.expr.operate(STRICTLY_LEFT_OF, other)
__lshift__ = strictly_left_of
def strictly_right_of(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Returns true if the column is strictly
right of the right hand operand.
"""
return self.expr.operate(STRICTLY_RIGHT_OF, other)
__rshift__ = strictly_right_of
def not_extend_right_of(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Returns true if the range in the column
does not extend right of the range in the operand.
"""
return self.expr.operate(NOT_EXTEND_RIGHT_OF, other)
def not_extend_left_of(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Returns true if the range in the column
does not extend left of the range in the operand.
"""
return self.expr.operate(NOT_EXTEND_LEFT_OF, other)
def adjacent_to(self, other: Any) -> ColumnElement[bool]:
"""Boolean expression. Returns true if the range in the column
is adjacent to the range in the operand.
"""
return self.expr.operate(ADJACENT_TO, other)
def union(self, other: Any) -> ColumnElement[bool]:
"""Range expression. Returns the union of the two ranges.
Will raise an exception if the resulting range is not
contiguous.
"""
return self.expr.operate(operators.add, other)
def difference(self, other: Any) -> ColumnElement[bool]:
"""Range expression. Returns the union of the two ranges.
Will raise an exception if the resulting range is not
contiguous.
"""
return self.expr.operate(operators.sub, other)
def intersection(self, other: Any) -> ColumnElement[Range[_T]]:
"""Range expression. Returns the intersection of the two ranges.
Will raise an exception if the resulting range is not
contiguous.
"""
return self.expr.operate(operators.mul, other)
| AbstractRange |
python | spyder-ide__spyder | spyder/plugins/plots/widgets/main_widget.py | {
"start": 873,
"end": 1338
} | class ____:
# Triggers
Save = 'save'
SaveAll = 'save all'
Copy = 'copy'
Close = 'close'
CloseAll = 'close all'
MoveToPreviousFigure = 'previous figure'
MoveToNextFigure = 'next figure'
ZoomIn = 'zoom in'
ZoomOut = 'zoom out'
# Toggles
ToggleMuteInlinePlotting = 'toggle_mute_inline_plotting_action'
ToggleShowPlotOutline = 'toggle_show_plot_outline_action'
ToggleAutoFitPlotting = 'auto fit'
| PlotsWidgetActions |
python | ApeWorX__ape | src/ape_ethereum/multicall/exceptions.py | {
"start": 93,
"end": 245
} | class ____(MulticallException):
def __init__(self, option_name: str):
super().__init__(f"Option '{option_name}' not supported.")
| InvalidOption |
python | astropy__astropy | astropy/coordinates/representation/geodetic.py | {
"start": 3836,
"end": 6353
} | class ____(BaseRepresentation):
"""Representation of points in bodycentric 3D coordinates.
Subclasses need to set attributes ``_equatorial_radius`` and ``_flattening``
to quantities holding correct values (with units of length and dimensionless,
respectively). the bodycentric latitude and longitude are spherical latitude
and longitude relative to the barycenter of the body.
"""
attr_classes = {"lon": Longitude, "lat": Latitude, "height": u.Quantity}
def __init_subclass__(cls, **kwargs):
if (
"_equatorial_radius" not in cls.__dict__
or "_flattening" not in cls.__dict__
):
raise AttributeError(
f"{cls.__name__} requires '_equatorial_radius' and '_flattening'."
)
super().__init_subclass__(**kwargs)
def __init__(self, lon, lat=None, height=None, copy=True):
if height is None and not isinstance(lon, self.__class__):
height = 0 << u.m
super().__init__(lon, lat, height, copy=copy)
if not self.height.unit.is_equivalent(u.m):
raise u.UnitTypeError(
f"{self.__class__.__name__} requires height with units of length."
)
def to_cartesian(self):
"""
Converts bodycentric coordinates to 3D rectangular (geocentric)
cartesian coordinates.
"""
coslat = np.cos(self.lat)
sinlat = np.sin(self.lat)
coslon = np.cos(self.lon)
sinlon = np.sin(self.lon)
r = (
self._equatorial_radius * np.hypot(coslat, (1 - self._flattening) * sinlat)
+ self.height
)
x = r * coslon * coslat
y = r * sinlon * coslat
z = r * sinlat
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates (assumed geocentric) to
bodycentric coordinates.
"""
# Compute bodycentric latitude
p = np.hypot(cart.x, cart.y)
d = np.hypot(p, cart.z)
lat = np.arctan2(cart.z, p)
p_spheroid = cls._equatorial_radius * np.cos(lat)
z_spheroid = cls._equatorial_radius * (1 - cls._flattening) * np.sin(lat)
r_spheroid = np.hypot(p_spheroid, z_spheroid)
height = d - r_spheroid
lon = np.arctan2(cart.y, cart.x)
return cls(lon, lat, height, copy=False)
@format_doc(geodetic_base_doc)
| BaseBodycentricRepresentation |
python | doocs__leetcode | solution/0700-0799/0788.Rotated Digits/Solution.py | {
"start": 0,
"end": 445
} | class ____:
def rotatedDigits(self, n: int) -> int:
def check(x):
y, t = 0, x
k = 1
while t:
v = t % 10
if d[v] == -1:
return False
y = d[v] * k + y
k *= 10
t //= 10
return x != y
d = [0, 1, 5, -1, -1, 2, 9, -1, 8, 6]
return sum(check(i) for i in range(1, n + 1))
| Solution |
python | spack__spack | lib/spack/spack/llnl/util/lock.py | {
"start": 29712,
"end": 29949
} | class ____(LockPermissionError):
"""Tried to take an exclusive lock on a read-only file."""
def __init__(self, path):
msg = "Can't take write lock on read-only file: %s" % path
super().__init__(msg)
| LockROFileError |
python | google__jax | tests/clear_backends_test.py | {
"start": 766,
"end": 1165
} | class ____(jtu.JaxTestCase):
def test_clear_backends(self):
g = jax.jit(lambda x, y: x * y)
self.assertEqual(g(1, 2), 2)
self.assertNotEmpty(xb.get_backend().live_executables())
api.clear_backends()
self.assertEmpty(xb.get_backend().live_executables())
self.assertEqual(g(1, 2), 2)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| ClearBackendsTest |
python | pytorch__pytorch | torch/ao/quantization/observer.py | {
"start": 4642,
"end": 5680
} | class ____(ABC, nn.Module):
r"""Base observer Module.
Any observer implementation should derive from this class.
Concrete observers should follow the same API. In forward, they will update
the statistics of the observed Tensor. And they should provide a
`calculate_qparams` function that computes the quantization parameters given
the collected statistics.
Args:
dtype: dtype argument to the `quantize` node needed to implement the
reference model spec.
is_dynamic: indicator for whether the observer is a placeholder for dynamic quantization
or static quantization
"""
def __init__(self, dtype, is_dynamic: bool = False):
super().__init__()
self.dtype = dtype
self.is_dynamic = is_dynamic
@abstractmethod
def forward(self, x):
pass
@abstractmethod
def calculate_qparams(self, **kwargs):
pass
with_args = classmethod(_with_args)
with_callable_args = classmethod(_with_callable_args)
| ObserverBase |
python | django__django | django/dispatch/dispatcher.py | {
"start": 1017,
"end": 19189
} | class ____:
"""
Base class for all signals
Internal attributes:
receivers:
[
(
(id(receiver), id(sender)),
ref(receiver),
ref(sender),
is_async,
)
]
sender_receivers_cache:
WeakKeyDictionary[sender, list[receiver]]
"""
def __init__(self, use_caching=False):
"""
Create a new signal.
"""
self.receivers = []
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects. Receivers can be
asynchronous.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
a Python object, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references
will be used.
dispatch_uid
An identifier used to uniquely identify a particular instance
of a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
if not callable(receiver):
raise TypeError("Signal receivers must be callable.")
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError(
"Signal receivers must accept keyword arguments (**kwargs)."
)
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
is_async = iscoroutinefunction(receiver)
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, "__self__") and hasattr(receiver, "__func__"):
ref = weakref.WeakMethod
receiver_object = receiver.__self__
receiver = ref(receiver)
weakref.finalize(receiver_object, self._flag_dead_receivers)
# Keep a weakref to sender if possible to ensure associated receivers
# are cleared if it gets garbage collected. This ensures there is no
# id(sender) collisions for distinct senders with non-overlapping
# lifetimes.
sender_ref = None
if sender is not None:
try:
sender_ref = weakref.ref(sender, self._flag_dead_receivers)
except TypeError:
pass
with self.lock:
self._clear_dead_receivers()
if not any(r_key == lookup_key for r_key, _, _, _ in self.receivers):
self.receivers.append((lookup_key, receiver, sender_ref, is_async))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The
receiver will be removed from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
r_key, *_ = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
sync_receivers, async_receivers = self._live_receivers(sender)
return bool(sync_receivers) or bool(async_receivers)
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through
send, terminating the dispatch loop. So it's possible that all
receivers won't be called if an error is raised.
If any receivers are asynchronous, they are called after all the
synchronous receivers via a single call to async_to_sync(). They are
also executed concurrently with asyncio.TaskGroup().
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
"""
if (
not self.receivers
or self.sender_receivers_cache.get(sender) is NO_RECEIVERS
):
return []
responses = []
sync_receivers, async_receivers = self._live_receivers(sender)
for receiver in sync_receivers:
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
if async_receivers:
async def asend():
async_responses = await _gather(
*(
receiver(signal=self, sender=sender, **named)
for receiver in async_receivers
)
)
return zip(async_receivers, async_responses)
responses.extend(async_to_sync(asend)())
return responses
async def asend(self, sender, **named):
"""
Send signal from sender to all connected receivers in async mode.
All sync receivers will be wrapped by sync_to_async()
If any receiver raises an error, the error propagates back through
send, terminating the dispatch loop. So it's possible that all
receivers won't be called if an error is raised.
If any receivers are synchronous, they are grouped and called behind a
sync_to_async() adaption before executing any asynchronous receivers.
If any receivers are asynchronous, they are grouped and executed
concurrently with asyncio.TaskGroup().
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ...].
"""
if (
not self.receivers
or self.sender_receivers_cache.get(sender) is NO_RECEIVERS
):
return []
sync_receivers, async_receivers = self._live_receivers(sender)
if sync_receivers:
@sync_to_async
def sync_send():
responses = []
for receiver in sync_receivers:
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
else:
async def sync_send():
return []
responses, async_responses = await _gather(
sync_send(),
_gather(
*(
receiver(signal=self, sender=sender, **named)
for receiver in async_receivers
)
),
)
responses.extend(zip(async_receivers, async_responses))
return responses
def _log_robust_failure(self, receiver, err):
logger.error(
"Error calling %s in Signal.send_robust() (%s)",
receiver.__qualname__,
err,
exc_info=err,
)
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
If any receivers are asynchronous, they are called after all the
synchronous receivers via a single call to async_to_sync(). They are
also executed concurrently with asyncio.TaskGroup().
Arguments:
sender
The sender of the signal. Can be any Python object (normally
one registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
If any receiver raises an error (specifically any subclass of
Exception), return the error instance as the result for that receiver.
"""
if (
not self.receivers
or self.sender_receivers_cache.get(sender) is NO_RECEIVERS
):
return []
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
sync_receivers, async_receivers = self._live_receivers(sender)
for receiver in sync_receivers:
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
self._log_robust_failure(receiver, err)
responses.append((receiver, err))
else:
responses.append((receiver, response))
if async_receivers:
async def asend_and_wrap_exception(receiver):
try:
response = await receiver(signal=self, sender=sender, **named)
except Exception as err:
self._log_robust_failure(receiver, err)
return err
return response
async def asend():
async_responses = await _gather(
*(
asend_and_wrap_exception(receiver)
for receiver in async_receivers
)
)
return zip(async_receivers, async_responses)
responses.extend(async_to_sync(asend)())
return responses
async def asend_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
If any receivers are synchronous, they are grouped and called behind a
sync_to_async() adaption before executing any asynchronous receivers.
If any receivers are asynchronous, they are grouped and executed
concurrently with asyncio.TaskGroup.
Arguments:
sender
The sender of the signal. Can be any Python object (normally
one registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
If any receiver raises an error (specifically any subclass of
Exception), return the error instance as the result for that receiver.
"""
if (
not self.receivers
or self.sender_receivers_cache.get(sender) is NO_RECEIVERS
):
return []
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
sync_receivers, async_receivers = self._live_receivers(sender)
if sync_receivers:
@sync_to_async
def sync_send():
responses = []
for receiver in sync_receivers:
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
self._log_robust_failure(receiver, err)
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
else:
async def sync_send():
return []
async def asend_and_wrap_exception(receiver):
try:
response = await receiver(signal=self, sender=sender, **named)
except Exception as err:
self._log_robust_failure(receiver, err)
return err
return response
responses, async_responses = await _gather(
sync_send(),
_gather(
*(asend_and_wrap_exception(receiver) for receiver in async_receivers),
),
)
responses.extend(zip(async_receivers, async_responses))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
self.receivers = [
r
for r in self.receivers
if (
not (isinstance(r[1], weakref.ReferenceType) and r[1]() is None)
and not (r[2] is not None and r[2]() is None)
)
]
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this
# case in .send() prior to calling _live_receivers() due to
# concurrent .send() call.
if receivers is NO_RECEIVERS:
return [], []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (
(_receiverkey, r_senderkey),
receiver,
sender_ref,
is_async,
) in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append((receiver, sender_ref, is_async))
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_sync_receivers = []
non_weak_async_receivers = []
for receiver, sender_ref, is_async in receivers:
# Skip if the receiver/sender is a dead weakref
if isinstance(receiver, weakref.ReferenceType):
receiver = receiver()
if receiver is None:
continue
if sender_ref is not None and sender_ref() is None:
continue
if is_async:
non_weak_async_receivers.append(receiver)
else:
non_weak_sync_receivers.append(receiver)
return non_weak_sync_receivers, non_weak_async_receivers
def _flag_dead_receivers(self, reference=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _flag_dead_receivers() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| Signal |
python | pyqtgraph__pyqtgraph | pyqtgraph/exporters/ImageExporter.py | {
"start": 250,
"end": 5153
} | class ____(Exporter):
Name = "Image File (PNG, TIF, JPG, ...)"
allowCopy = True
def __init__(self, item):
Exporter.__init__(self, item)
tr = self.getTargetRect()
if isinstance(item, QtWidgets.QGraphicsItem):
scene = item.scene()
else:
scene = item
bgbrush = scene.views()[0].backgroundBrush()
bg = bgbrush.color()
if bgbrush.style() == QtCore.Qt.BrushStyle.NoBrush:
bg.setAlpha(0)
self.params = Parameter.create(name='params', type='group', children=[
{
'name': 'width',
'title': translate("Exporter", 'width'),
'type': 'int',
'value': int(tr.width()),
'limits': (0, None)
},
{
'name': 'height',
'title': translate("Exporter", 'height'),
'type': 'int',
'value': int(tr.height()),
'limits': (0, None)
},
{
'name': 'antialias',
'title': translate("Exporter", 'antialias'),
'type': 'bool',
'value': True
},
{
'name': 'background',
'title': translate("Exporter", 'background'),
'type': 'color',
'value': bg
},
{
'name': 'invertValue',
'title': translate("Exporter", 'invertValue'),
'type': 'bool',
'value': False
}
])
self.params.param('width').sigValueChanged.connect(self.widthChanged)
self.params.param('height').sigValueChanged.connect(self.heightChanged)
def widthChanged(self):
sr = self.getSourceRect()
ar = float(sr.height()) / sr.width()
self.params.param('height').setValue(int(self.params['width'] * ar), blockSignal=self.heightChanged)
def heightChanged(self):
sr = self.getSourceRect()
ar = float(sr.width()) / sr.height()
self.params.param('width').setValue(int(self.params['height'] * ar), blockSignal=self.widthChanged)
def parameters(self):
return self.params
@staticmethod
def getSupportedImageFormats():
filter = ["*."+f.data().decode('utf-8') for f in QtGui.QImageWriter.supportedImageFormats()]
preferred = ['*.png', '*.tif', '*.jpg']
for p in preferred[::-1]:
if p in filter:
filter.remove(p)
filter.insert(0, p)
return filter
def export(self, fileName=None, toBytes=False, copy=False):
if fileName is None and not toBytes and not copy:
filter = self.getSupportedImageFormats()
self.fileSaveDialog(filter=filter)
return
w = int(self.params['width'])
h = int(self.params['height'])
if w == 0 or h == 0:
raise Exception("Cannot export image with size=0 (requested "
"export size is %dx%d)" % (w, h))
targetRect = QtCore.QRect(0, 0, w, h)
sourceRect = self.getSourceRect()
self.png = QtGui.QImage(w, h, QtGui.QImage.Format.Format_ARGB32)
self.png.fill(self.params['background'])
## set resolution of image:
origTargetRect = self.getTargetRect()
resolutionScale = targetRect.width() / origTargetRect.width()
#self.png.setDotsPerMeterX(self.png.dotsPerMeterX() * resolutionScale)
#self.png.setDotsPerMeterY(self.png.dotsPerMeterY() * resolutionScale)
painter = QtGui.QPainter(self.png)
#dtr = painter.deviceTransform()
try:
self.setExportMode(True, {
'antialias': self.params['antialias'],
'background': self.params['background'],
'painter': painter,
'resolutionScale': resolutionScale})
painter.setRenderHint(QtGui.QPainter.RenderHint.Antialiasing, self.params['antialias'])
self.getScene().render(painter, QtCore.QRectF(targetRect), QtCore.QRectF(sourceRect))
finally:
self.setExportMode(False)
painter.end()
if self.params['invertValue']:
bg = fn.ndarray_from_qimage(self.png)
if sys.byteorder == 'little':
cv = slice(0, 3)
else:
cv = slice(1, 4)
mn = bg[...,cv].min(axis=2)
mx = bg[...,cv].max(axis=2)
d = (255 - mx) - mn
bg[...,cv] += d[...,np.newaxis]
if copy:
QtWidgets.QApplication.clipboard().setImage(self.png)
elif toBytes:
return self.png
else:
return self.png.save(fileName)
ImageExporter.register()
| ImageExporter |
python | huggingface__transformers | src/transformers/models/align/configuration_align.py | {
"start": 5667,
"end": 11855
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AlignVisionModel`]. It is used to instantiate a
ALIGN vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the vision encoder of the ALIGN
[kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture. The default values are copied
from EfficientNet (efficientnet-b7)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 600):
The input image size.
width_coefficient (`float`, *optional*, defaults to 2.0):
Scaling coefficient for network width at each stage.
depth_coefficient (`float`, *optional*, defaults to 3.1):
Scaling coefficient for network depth at each stage.
depth_divisor `int`, *optional*, defaults to 8):
A unit of network width.
kernel_sizes (`list[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`):
List of kernel sizes to be used in each block.
in_channels (`list[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`):
List of input channel sizes to be used in each block for convolutional layers.
out_channels (`list[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`):
List of output channel sizes to be used in each block for convolutional layers.
depthwise_padding (`list[int]`, *optional*, defaults to `[]`):
List of block indices with square padding.
strides (`list[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`):
List of stride sizes to be used in each block for convolutional layers.
num_block_repeats (`list[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`):
List of the number of times each block is to repeated.
expand_ratios (`list[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`):
List of scaling coefficient of each block.
squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25):
Squeeze expansion ratio.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
`"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported.
hidden_dim (`int`, *optional*, defaults to 1280):
The hidden dimension of the layer before the classification head.
pooling_type (`str` or `function`, *optional*, defaults to `"mean"`):
Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`,
`"max"`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
batch_norm_eps (`float`, *optional*, defaults to 1e-3):
The epsilon used by the batch normalization layers.
batch_norm_momentum (`float`, *optional*, defaults to 0.99):
The momentum used by the batch normalization layers.
drop_connect_rate (`float`, *optional*, defaults to 0.2):
The drop rate for skip connections.
Example:
```python
>>> from transformers import AlignVisionConfig, AlignVisionModel
>>> # Initializing a AlignVisionConfig with kakaobrain/align-base style configuration
>>> configuration = AlignVisionConfig()
>>> # Initializing a AlignVisionModel (with random weights) from the kakaobrain/align-base style configuration
>>> model = AlignVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "align_vision_model"
base_config_key = "vision_config"
def __init__(
self,
num_channels: int = 3,
image_size: int = 600,
width_coefficient: float = 2.0,
depth_coefficient: float = 3.1,
depth_divisor: int = 8,
kernel_sizes: list[int] = [3, 3, 5, 3, 5, 5, 3],
in_channels: list[int] = [32, 16, 24, 40, 80, 112, 192],
out_channels: list[int] = [16, 24, 40, 80, 112, 192, 320],
depthwise_padding: list[int] = [],
strides: list[int] = [1, 2, 2, 2, 1, 2, 1],
num_block_repeats: list[int] = [1, 2, 2, 3, 3, 4, 1],
expand_ratios: list[int] = [1, 6, 6, 6, 6, 6, 6],
squeeze_expansion_ratio: float = 0.25,
hidden_act: str = "swish",
hidden_dim: int = 2560,
pooling_type: str = "mean",
initializer_range: float = 0.02,
batch_norm_eps: float = 0.001,
batch_norm_momentum: float = 0.99,
drop_connect_rate: float = 0.2,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.image_size = image_size
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.depth_divisor = depth_divisor
self.kernel_sizes = kernel_sizes
self.in_channels = in_channels
self.out_channels = out_channels
self.depthwise_padding = depthwise_padding
self.strides = strides
self.num_block_repeats = num_block_repeats
self.expand_ratios = expand_ratios
self.squeeze_expansion_ratio = squeeze_expansion_ratio
self.hidden_act = hidden_act
self.hidden_dim = hidden_dim
self.pooling_type = pooling_type
self.initializer_range = initializer_range
self.batch_norm_eps = batch_norm_eps
self.batch_norm_momentum = batch_norm_momentum
self.drop_connect_rate = drop_connect_rate
self.num_hidden_layers = sum(num_block_repeats) * 4
| AlignVisionConfig |
python | openai__openai-python | src/openai/resources/audio/audio.py | {
"start": 905,
"end": 2013
} | class ____(SyncAPIResource):
@cached_property
def transcriptions(self) -> Transcriptions:
return Transcriptions(self._client)
@cached_property
def translations(self) -> Translations:
return Translations(self._client)
@cached_property
def speech(self) -> Speech:
return Speech(self._client)
@cached_property
def with_raw_response(self) -> AudioWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AudioWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AudioWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AudioWithStreamingResponse(self)
| Audio |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 72296,
"end": 73905
} | class ____(SpeechT5PreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when used in combination with
[`SpeechT5Model`].
"""
def __init__(self, config: SpeechT5Config):
super().__init__(config)
self.wrapped_decoder = SpeechT5Decoder(config)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
outputs = self.wrapped_decoder(
hidden_states=input_values,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
return outputs
| SpeechT5DecoderWithoutPrenet |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 51465,
"end": 55933
} | class ____(GeneratedAirbyteDestination):
class StandardInserts:
@public
def __init__(
self,
):
self.method = "Standard"
class HMACKey:
@public
def __init__(self, hmac_key_access_id: str, hmac_key_secret: str):
self.credential_type = "HMAC_KEY"
self.hmac_key_access_id = check.str_param(hmac_key_access_id, "hmac_key_access_id")
self.hmac_key_secret = check.str_param(hmac_key_secret, "hmac_key_secret")
class GCSStaging:
@public
def __init__(
self,
credential: "BigqueryDenormalizedDestination.HMACKey",
gcs_bucket_name: str,
gcs_bucket_path: str,
keep_files_in_gcs_bucket: Optional[str] = None,
):
self.method = "GCS Staging"
self.credential = check.inst_param(
credential, "credential", BigqueryDenormalizedDestination.HMACKey
)
self.gcs_bucket_name = check.str_param(gcs_bucket_name, "gcs_bucket_name")
self.gcs_bucket_path = check.str_param(gcs_bucket_path, "gcs_bucket_path")
self.keep_files_in_gcs_bucket = check.opt_str_param(
keep_files_in_gcs_bucket, "keep_files_in_gcs_bucket"
)
@public
def __init__(
self,
name: str,
project_id: str,
dataset_id: str,
loading_method: Union[
"BigqueryDenormalizedDestination.StandardInserts",
"BigqueryDenormalizedDestination.GCSStaging",
],
credentials_json: Optional[str] = None,
dataset_location: Optional[str] = None,
big_query_client_buffer_size_mb: Optional[int] = None,
):
"""Airbyte Destination for Bigquery Denormalized.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/bigquery
Args:
name (str): The name of the destination.
project_id (str): The GCP project ID for the project containing the target BigQuery dataset. Read more here.
dataset_id (str): The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
loading_method (Union[BigqueryDenormalizedDestination.StandardInserts, BigqueryDenormalizedDestination.GCSStaging]): Loading method used to send select the way data will be uploaded to BigQuery. Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging. GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here.
credentials_json (Optional[str]): The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
dataset_location (Optional[str]): The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here.
big_query_client_buffer_size_mb (Optional[int]): Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
"""
self.project_id = check.str_param(project_id, "project_id")
self.dataset_id = check.str_param(dataset_id, "dataset_id")
self.loading_method = check.inst_param(
loading_method,
"loading_method",
(
BigqueryDenormalizedDestination.StandardInserts,
BigqueryDenormalizedDestination.GCSStaging,
),
)
self.credentials_json = check.opt_str_param(credentials_json, "credentials_json")
self.dataset_location = check.opt_str_param(dataset_location, "dataset_location")
self.big_query_client_buffer_size_mb = check.opt_int_param(
big_query_client_buffer_size_mb, "big_query_client_buffer_size_mb"
)
super().__init__("Bigquery Denormalized", name)
| BigqueryDenormalizedDestination |
python | h5py__h5py | h5py/tests/test_file2.py | {
"start": 9712,
"end": 11160
} | class ____(TestCase):
"""
Feature: The meta block size can be manipulated, changing how metadata
is aggregated and the offset of the first dataset.
"""
def test_file_create_with_meta_block_size_4096(self):
# Test a large meta block size of 4 kibibytes
meta_block_size = 4096
with File(
self.mktemp(), 'w',
meta_block_size=meta_block_size,
libver="latest"
) as f:
f["test"] = 5
self.assertEqual(f.meta_block_size, meta_block_size)
# Equality is expected for HDF5 1.10
self.assertGreaterEqual(f["test"].id.get_offset(), meta_block_size)
def test_file_create_with_meta_block_size_512(self):
# Test a small meta block size of 512 bytes
# The smallest verifiable meta_block_size is 463
meta_block_size = 512
libver = "latest"
with File(
self.mktemp(), 'w',
meta_block_size=meta_block_size,
libver=libver
) as f:
f["test"] = 3
self.assertEqual(f.meta_block_size, meta_block_size)
# Equality is expected for HDF5 1.10
self.assertGreaterEqual(f["test"].id.get_offset(), meta_block_size)
# Default meta_block_size is 2048. This should fail if meta_block_size is not set.
self.assertLess(f["test"].id.get_offset(), meta_block_size*2)
| TestFileMetaBlockSize |
python | PrefectHQ__prefect | tests/test_flow_engine.py | {
"start": 4209,
"end": 4870
} | class ____:
async def test_start_updates_empirical_policy_on_provided_flow_run(
self, prefect_client: PrefectClient
):
@flow(retries=3, retry_delay_seconds=10)
def flow_with_retries():
pass
flow_run = await prefect_client.create_flow_run(flow_with_retries)
engine = AsyncFlowRunEngine(flow=flow_with_retries, flow_run=flow_run)
async with engine.start():
assert engine.flow_run.empirical_policy.retries == 3
assert engine.flow_run.empirical_policy.retry_delay == 10
# avoid error on teardown
await engine.begin_run()
| TestStartAsyncFlowRunEngine |
python | numba__numba | numba/core/types/misc.py | {
"start": 3825,
"end": 4427
} | class ____(Type):
"""
Type class for pointers to other types.
Attributes
----------
dtype : The pointee type
addrspace : int
The address space pointee belongs to.
"""
mutable = True
def __init__(self, dtype, addrspace=None):
self.dtype = dtype
self.addrspace = addrspace
if addrspace is not None:
name = "%s_%s*" % (dtype, addrspace)
else:
name = "%s*" % dtype
super(CPointer, self).__init__(name)
@property
def key(self):
return self.dtype, self.addrspace
| CPointer |
python | pypa__warehouse | warehouse/forms.py | {
"start": 1078,
"end": 2913
} | class ____:
# From the zxcvbn documentation, a score of 2 is:
# somewhat guessable: protection from unthrottled online attacks.
# (guesses < 10^8)
# So we're going to require at least a score of 2 to be a valid password.
# That should (ideally) provide protection against all attacks that don't
# involve a lost database dump.
def __init__(self, *, user_input_fields=None, required_strength=2):
self.user_input_fields = user_input_fields or []
self.required_strength = required_strength
def __call__(self, form, field):
# Get all of our additional data to be used as user input to zxcvbn.
user_inputs = []
for fieldname in self.user_input_fields:
try:
user_inputs.append(form[fieldname].data)
except KeyError:
raise ValidationError(f"Invalid field name: {fieldname!r}")
# Actually ask zxcvbn to check the strength of the given field's data.
results = zxcvbn(
field.data, user_inputs=user_inputs, max_length=MAX_PASSWORD_SIZE
)
# Determine if the score is too low, and if it is produce a nice error
# message, *hopefully* with suggestions to make the password stronger.
if results["score"] < self.required_strength:
msg = (
results["feedback"]["warning"]
if results["feedback"]["warning"]
# Note: we can't localize this string because it will be mixed
# with other non-localizable strings from zxcvbn
else "Password is too easily guessed."
)
if results["feedback"]["suggestions"]:
msg += " " + " ".join(results["feedback"]["suggestions"])
raise ValidationError(msg)
| PasswordStrengthValidator |
python | great-expectations__great_expectations | tests/core/test__docs_decorators.py | {
"start": 12712,
"end": 18922
} | class ____:
@pytest.mark.unit
def test_new_argument_decorator_full_docstring_new_argument(self):
assert _func_full_docstring_new_argument.__doc__ == (
"My docstring.\n"
"\n"
"Longer description.\n"
"\n"
"Args:\n"
" some_arg:\n"
" describe some_arg\n"
" \n"
" .. versionadded:: 1.2.3\n"
" some msg\n"
" other_arg:\n"
" describe other_arg"
)
@pytest.mark.unit
def test_new_argument_decorator_full_docstring_two_new_arguments(self):
assert _func_full_docstring_two_new_arguments.__doc__ == (
"My docstring.\n"
"\n"
"Longer description.\n"
"\n"
"Args:\n"
" some_arg:\n"
" describe some_arg\n"
" \n"
" .. versionadded:: 1.2.3\n"
" some msg\n"
" other_arg:\n"
" describe other_arg\n"
" \n"
" .. versionadded:: 1.2.3\n"
" some other msg"
)
@pytest.mark.unit
def test_new_argument_full_docstring_new_argument_no_description(self):
assert _func_full_docstring_new_argument_no_description.__doc__ == (
"My docstring.\n"
"\n"
"Longer description.\n"
"\n"
"Args:\n"
" some_arg:\n"
" \n"
" \n"
" .. versionadded:: 1.2.3\n"
" some msg\n"
" other_arg:\n"
" describe other_arg"
)
@pytest.mark.unit
def test_new_argument_full_docstring_new_argument_missing(self):
with pytest.raises(ValueError) as e:
@new_argument(
argument_name="this_arg_doesnt_exist",
version="1.2.3",
message="some msg",
)
def _func_full_docstring_new_argument_missing(some_arg, other_arg):
"""My docstring.
Longer description.
Args:
some_arg: describe some_arg
other_arg: describe other_arg
"""
pass
assert "Please specify an existing argument, you specified this_arg_doesnt_exist." in str(
e.value
)
# All Decorators
@public_api
@new_method_or_class(version="1.2.3", message="Added in version 1.2.3")
@deprecated_method_or_class(version="1.2.3", message="This is deprecated!!")
@new_argument(argument_name="some_arg", version="1.2.3", message="some msg")
@deprecated_argument(argument_name="other_arg", version="1.2.3", message="some msg")
def _func_full_docstring_all_decorators(some_arg, other_arg):
"""My docstring.
Longer description.
Args:
some_arg: describe some_arg
other_arg: describe other_arg
"""
pass
@pytest.mark.unit
def test_all_decorators_full_docstring():
assert _func_full_docstring_all_decorators.__doc__ == (
"--Public API--My docstring.\n"
"\n"
".. versionadded:: 1.2.3\n"
" Added in version 1.2.3\n"
"\n"
"\n"
".. deprecated:: 1.2.3\n"
" This is deprecated!!\n"
"\n"
"\n"
"Longer description.\n"
"\n"
"Args:\n"
" some_arg:\n"
" describe some_arg\n"
"\n"
" .. versionadded:: 1.2.3\n"
" some msg\n"
" other_arg:\n"
" describe other_arg\n"
"\n"
" .. deprecated:: 1.2.3\n"
" some msg"
)
@public_api
@new_method_or_class(version="1.2.3", message="Added in version 1.2.3")
@deprecated_method_or_class(version="1.2.3", message="This is deprecated!!")
@new_argument(argument_name="some_arg", version="1.2.3", message="some msg")
@deprecated_argument(argument_name="other_arg", version="1.2.3", message="some msg")
def _func_full_docstring_all_decorators_all_sections(
arg_not_decorated, some_arg, other_arg, other_arg_not_decorated
):
"""My docstring.
Longer description.
Usage:
Some usage example.
Args:
arg_not_decorated: description.
some_arg: describe some_arg.
other_arg: describe other_arg.
other_arg_not_decorated: description.
Returns:
Some returns value.
Next line.
Raises:
SomeError: Some error text.
"""
pass
@pytest.mark.unit
def test_all_decorators_full_docstring_all_sections():
"""Makes sure that Returns and Raises are rendered correctly in the context of a full docstring.""" # noqa: E501 # FIXME CoP
assert _func_full_docstring_all_decorators_all_sections.__doc__ == (
"--Public API--My docstring.\n"
"\n"
".. versionadded:: 1.2.3\n"
" Added in version 1.2.3\n"
"\n"
"\n"
".. deprecated:: 1.2.3\n"
" This is deprecated!!\n"
"\n"
"\n"
"Longer description.\n"
"\n"
"Usage:\n"
" Some usage example.\n"
"\n"
"Args:\n"
" arg_not_decorated:\n"
" description.\n"
" some_arg:\n"
" describe some_arg.\n"
"\n"
" .. versionadded:: 1.2.3\n"
" some msg\n"
" other_arg:\n"
" describe other_arg.\n"
"\n"
" .. deprecated:: 1.2.3\n"
" some msg\n"
" other_arg_not_decorated:\n"
" description.\n"
"\n"
"Returns:\n"
" :\n"
" Some returns value.\n"
" Next line.\n"
"\n"
"Raises:\n"
" SomeError:\n"
" Some error text."
)
@pytest.mark.unit
def test_all_decorators_do_not_change_function_name():
assert _func_full_docstring_all_decorators.__name__ == "_func_full_docstring_all_decorators"
# Class level decorators
@public_api
| TestNewArgument |
python | getsentry__sentry | tests/sentry_plugins/twilio/test_plugin.py | {
"start": 327,
"end": 2617
} | class ____(TestCase):
def test_valid_split_sms_to(self) -> None:
to = "330-509-3095, (330)-509-3095, +13305093095, 4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_with_extra_spaces(self) -> None:
to = "330-509-3095 , (330)-509-3095, +13305093095, 4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_with_just_spaces(self) -> None:
to = "330-509-3095 (330)-509-3095 +13305093095 4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_with_no_whitespace(self) -> None:
to = "330-509-3095,(330)-509-3095,+13305093095,4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_split_sms_to_with_single_number(self) -> None:
to = "555-555-5555"
expected = {"555-555-5555"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_newline(self) -> None:
to = "330-509-3095,\n(330)-509-3095\n,+13305093095\n,\n4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_with_just_newlines(self) -> None:
to = "330-509-3095\n(330)-509-3095\n+13305093095\n\n4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_with_extra_newlines(self) -> None:
to = "330-509-3095\n\n\n\n\n,\n\n\n\n\n\n\n\n\n(330)-509-3095,\n\n\n\n+13305093095,\n\n4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
| TwilioPluginSMSSplitTest |
python | huggingface__transformers | src/transformers/models/x_clip/modeling_x_clip.py | {
"start": 14415,
"end": 17139
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: XCLIPConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = XCLIPAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = XCLIPMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->XCLIP
| XCLIPEncoderLayer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.