language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__pipenv | pipenv/exceptions.py | {
"start": 9231,
"end": 10036
} | class ____(PipenvException):
def __init__(self, package, command, return_values, return_code, **kwargs):
extra = [
"{} {}".format(
"[cyan]Attempting to run command: [/cyan]",
f"[bold yellow]$ {command!r}[/bold yellow]",
)
]
extra.extend(
[f"[cyan]{line.strip()}[/cyan]" for line in return_values.splitlines()]
)
if isinstance(package, (tuple, list, set)):
package = " ".join(package)
message = "{!s} {!s}...".format(
"Failed to uninstall package(s)",
f"[bold yellow]{package}!s[/bold yellow]",
)
self.exit_code = return_code
PipenvException.__init__(self, message=message, extra=extra)
self.extra = extra
| UninstallError |
python | doocs__leetcode | solution/3100-3199/3119.Maximum Number of Potholes That Can Be Fixed/Solution.py | {
"start": 0,
"end": 612
} | class ____:
def maxPotholes(self, road: str, budget: int) -> int:
road += "."
n = len(road)
cnt = [0] * n
k = 0
for c in road:
if c == "x":
k += 1
elif k:
cnt[k] += 1
k = 0
ans = 0
for k in range(n - 1, 0, -1):
if cnt[k] == 0:
continue
t = min(budget // (k + 1), cnt[k])
ans += t * k
budget -= t * (k + 1)
if budget == 0:
break
cnt[k - 1] += cnt[k] - t
return ans
| Solution |
python | wandb__wandb | wandb/sdk/lib/preinit.py | {
"start": 59,
"end": 1450
} | class ____:
def __init__(self, name: str, destination: Optional[Any] = None) -> None:
self._name = name
if destination is not None:
self.__doc__ = destination.__doc__
def __getitem__(self, key: str) -> None:
raise wandb.Error(f"You must call wandb.init() before {self._name}[{key!r}]")
def __setitem__(self, key: str, value: Any) -> Any:
raise wandb.Error(f"You must call wandb.init() before {self._name}[{key!r}]")
def __setattr__(self, key: str, value: Any) -> Any:
if not key.startswith("_"):
raise wandb.Error(f"You must call wandb.init() before {self._name}.{key}")
else:
return object.__setattr__(self, key, value)
def __getattr__(self, key: str) -> Any:
if not key.startswith("_"):
raise wandb.Error(f"You must call wandb.init() before {self._name}.{key}")
else:
raise AttributeError
def PreInitCallable( # noqa: N802
name: str, destination: Optional[Any] = None
) -> Callable:
def preinit_wrapper(*args: Any, **kwargs: Any) -> Any:
raise wandb.Error(f"You must call wandb.init() before {name}()")
preinit_wrapper.__name__ = str(name)
if destination:
preinit_wrapper.__wrapped__ = destination # type: ignore
preinit_wrapper.__doc__ = destination.__doc__
return preinit_wrapper
| PreInitObject |
python | lepture__authlib | authlib/oauth2/rfc6749/errors.py | {
"start": 6826,
"end": 6954
} | class ____(OAuth2Error):
error = "missing_token"
description = "Missing 'access_token' in response."
| MissingTokenException |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess4.py | {
"start": 205,
"end": 255
} | class ____(Protocol):
item: int
| HasItemProtocol1 |
python | sympy__sympy | sympy/physics/units/dimensions.py | {
"start": 10002,
"end": 21151
} | class ____(Basic, _QuantityMapper):
r"""
DimensionSystem represents a coherent set of dimensions.
The constructor takes three parameters:
- base dimensions;
- derived dimensions: these are defined in terms of the base dimensions
(for example velocity is defined from the division of length by time);
- dependency of dimensions: how the derived dimensions depend
on the base dimensions.
Optionally either the ``derived_dims`` or the ``dimensional_dependencies``
may be omitted.
"""
def __new__(cls, base_dims, derived_dims=(), dimensional_dependencies={}):
dimensional_dependencies = dict(dimensional_dependencies)
def parse_dim(dim):
if isinstance(dim, str):
dim = Dimension(Symbol(dim))
elif isinstance(dim, Dimension):
pass
elif isinstance(dim, Symbol):
dim = Dimension(dim)
else:
raise TypeError("%s wrong type" % dim)
return dim
base_dims = [parse_dim(i) for i in base_dims]
derived_dims = [parse_dim(i) for i in derived_dims]
for dim in base_dims:
if (dim in dimensional_dependencies
and (len(dimensional_dependencies[dim]) != 1 or
dimensional_dependencies[dim].get(dim, None) != 1)):
raise IndexError("Repeated value in base dimensions")
dimensional_dependencies[dim] = Dict({dim: 1})
def parse_dim_name(dim):
if isinstance(dim, Dimension):
return dim
elif isinstance(dim, str):
return Dimension(Symbol(dim))
elif isinstance(dim, Symbol):
return Dimension(dim)
else:
raise TypeError("unrecognized type %s for %s" % (type(dim), dim))
for dim in dimensional_dependencies.keys():
dim = parse_dim(dim)
if (dim not in derived_dims) and (dim not in base_dims):
derived_dims.append(dim)
def parse_dict(d):
return Dict({parse_dim_name(i): j for i, j in d.items()})
# Make sure everything is a SymPy type:
dimensional_dependencies = {parse_dim_name(i): parse_dict(j) for i, j in
dimensional_dependencies.items()}
for dim in derived_dims:
if dim in base_dims:
raise ValueError("Dimension %s both in base and derived" % dim)
if dim not in dimensional_dependencies:
# TODO: should this raise a warning?
dimensional_dependencies[dim] = Dict({dim: 1})
base_dims.sort(key=default_sort_key)
derived_dims.sort(key=default_sort_key)
base_dims = Tuple(*base_dims)
derived_dims = Tuple(*derived_dims)
dimensional_dependencies = Dict({i: Dict(j) for i, j in dimensional_dependencies.items()})
obj = Basic.__new__(cls, base_dims, derived_dims, dimensional_dependencies)
return obj
@property
def base_dims(self):
return self.args[0]
@property
def derived_dims(self):
return self.args[1]
@property
def dimensional_dependencies(self):
return self.args[2]
def _get_dimensional_dependencies_for_name(self, dimension):
if isinstance(dimension, str):
dimension = Dimension(Symbol(dimension))
elif not isinstance(dimension, Dimension):
dimension = Dimension(dimension)
if dimension.name.is_Symbol:
# Dimensions not included in the dependencies are considered
# as base dimensions:
return dict(self.dimensional_dependencies.get(dimension, {dimension: 1}))
if dimension.name.is_number or dimension.name.is_NumberSymbol:
return {}
get_for_name = self._get_dimensional_dependencies_for_name
if dimension.name.is_Mul:
ret = collections.defaultdict(int)
dicts = [get_for_name(i) for i in dimension.name.args]
for d in dicts:
for k, v in d.items():
ret[k] += v
return {k: v for (k, v) in ret.items() if v != 0}
if dimension.name.is_Add:
dicts = [get_for_name(i) for i in dimension.name.args]
if all(d == dicts[0] for d in dicts[1:]):
return dicts[0]
raise TypeError("Only equivalent dimensions can be added or subtracted.")
if dimension.name.is_Pow:
dim_base = get_for_name(dimension.name.base)
dim_exp = get_for_name(dimension.name.exp)
if dim_exp == {} or dimension.name.exp.is_Symbol:
return {k: v * dimension.name.exp for (k, v) in dim_base.items()}
else:
raise TypeError("The exponent for the power operator must be a Symbol or dimensionless.")
if dimension.name.is_Function:
args = (Dimension._from_dimensional_dependencies(
get_for_name(arg)) for arg in dimension.name.args)
result = dimension.name.func(*args)
dicts = [get_for_name(i) for i in dimension.name.args]
if isinstance(result, Dimension):
return self.get_dimensional_dependencies(result)
elif result.func == dimension.name.func:
if isinstance(dimension.name, TrigonometricFunction):
if dicts[0] in ({}, {Dimension('angle'): 1}):
return {}
else:
raise TypeError("The input argument for the function {} must be dimensionless or have dimensions of angle.".format(dimension.func))
else:
if all(item == {} for item in dicts):
return {}
else:
raise TypeError("The input arguments for the function {} must be dimensionless.".format(dimension.func))
else:
return get_for_name(result)
raise TypeError("Type {} not implemented for get_dimensional_dependencies".format(type(dimension.name)))
def get_dimensional_dependencies(self, name, mark_dimensionless=False):
dimdep = self._get_dimensional_dependencies_for_name(name)
if mark_dimensionless and dimdep == {}:
return {Dimension(1): 1}
return dict(dimdep.items())
def equivalent_dims(self, dim1, dim2):
deps1 = self.get_dimensional_dependencies(dim1)
deps2 = self.get_dimensional_dependencies(dim2)
return deps1 == deps2
def extend(self, new_base_dims, new_derived_dims=(), new_dim_deps=None):
deps = dict(self.dimensional_dependencies)
if new_dim_deps:
deps.update(new_dim_deps)
new_dim_sys = DimensionSystem(
tuple(self.base_dims) + tuple(new_base_dims),
tuple(self.derived_dims) + tuple(new_derived_dims),
deps
)
new_dim_sys._quantity_dimension_map.update(self._quantity_dimension_map)
new_dim_sys._quantity_scale_factors.update(self._quantity_scale_factors)
return new_dim_sys
def is_dimensionless(self, dimension):
"""
Check if the dimension object really has a dimension.
A dimension should have at least one component with non-zero power.
"""
if dimension.name == 1:
return True
return self.get_dimensional_dependencies(dimension) == {}
@property
def list_can_dims(self):
"""
Useless method, kept for compatibility with previous versions.
DO NOT USE.
List all canonical dimension names.
"""
dimset = set()
for i in self.base_dims:
dimset.update(set(self.get_dimensional_dependencies(i).keys()))
return tuple(sorted(dimset, key=str))
@property
def inv_can_transf_matrix(self):
"""
Useless method, kept for compatibility with previous versions.
DO NOT USE.
Compute the inverse transformation matrix from the base to the
canonical dimension basis.
It corresponds to the matrix where columns are the vector of base
dimensions in canonical basis.
This matrix will almost never be used because dimensions are always
defined with respect to the canonical basis, so no work has to be done
to get them in this basis. Nonetheless if this matrix is not square
(or not invertible) it means that we have chosen a bad basis.
"""
matrix = reduce(lambda x, y: x.row_join(y),
[self.dim_can_vector(d) for d in self.base_dims])
return matrix
@property
def can_transf_matrix(self):
"""
Useless method, kept for compatibility with previous versions.
DO NOT USE.
Return the canonical transformation matrix from the canonical to the
base dimension basis.
It is the inverse of the matrix computed with inv_can_transf_matrix().
"""
#TODO: the inversion will fail if the system is inconsistent, for
# example if the matrix is not a square
return reduce(lambda x, y: x.row_join(y),
[self.dim_can_vector(d) for d in sorted(self.base_dims, key=str)]
).inv()
def dim_can_vector(self, dim):
"""
Useless method, kept for compatibility with previous versions.
DO NOT USE.
Dimensional representation in terms of the canonical base dimensions.
"""
vec = []
for d in self.list_can_dims:
vec.append(self.get_dimensional_dependencies(dim).get(d, 0))
return Matrix(vec)
def dim_vector(self, dim):
"""
Useless method, kept for compatibility with previous versions.
DO NOT USE.
Vector representation in terms of the base dimensions.
"""
return self.can_transf_matrix * Matrix(self.dim_can_vector(dim))
def print_dim_base(self, dim):
"""
Give the string expression of a dimension in term of the basis symbols.
"""
dims = self.dim_vector(dim)
symbols = [i.symbol if i.symbol is not None else i.name for i in self.base_dims]
res = S.One
for (s, p) in zip(symbols, dims):
res *= s**p
return res
@property
def dim(self):
"""
Useless method, kept for compatibility with previous versions.
DO NOT USE.
Give the dimension of the system.
That is return the number of dimensions forming the basis.
"""
return len(self.base_dims)
@property
def is_consistent(self):
"""
Useless method, kept for compatibility with previous versions.
DO NOT USE.
Check if the system is well defined.
"""
# not enough or too many base dimensions compared to independent
# dimensions
# in vector language: the set of vectors do not form a basis
return self.inv_can_transf_matrix.is_square
| DimensionSystem |
python | sympy__sympy | sympy/core/add.py | {
"start": 2712,
"end": 43211
} | class ____(Expr, AssocOp):
"""
Expression representing addition operation for algebraic group.
.. deprecated:: 1.7
Using arguments that aren't subclasses of :class:`~.Expr` in core
operators (:class:`~.Mul`, :class:`~.Add`, and :class:`~.Pow`) is
deprecated. See :ref:`non-expr-args-deprecated` for details.
Every argument of ``Add()`` must be ``Expr``. Infix operator ``+``
on most scalar objects in SymPy calls this class.
Another use of ``Add()`` is to represent the structure of abstract
addition so that its arguments can be substituted to return different
class. Refer to examples section for this.
``Add()`` evaluates the argument unless ``evaluate=False`` is passed.
The evaluation logic includes:
1. Flattening
``Add(x, Add(y, z))`` -> ``Add(x, y, z)``
2. Identity removing
``Add(x, 0, y)`` -> ``Add(x, y)``
3. Coefficient collecting by ``.as_coeff_Mul()``
``Add(x, 2*x)`` -> ``Mul(3, x)``
4. Term sorting
``Add(y, x, 2)`` -> ``Add(2, x, y)``
If no argument is passed, identity element 0 is returned. If single
element is passed, that element is returned.
Note that ``Add(*args)`` is more efficient than ``sum(args)`` because
it flattens the arguments. ``sum(a, b, c, ...)`` recursively adds the
arguments as ``a + (b + (c + ...))``, which has quadratic complexity.
On the other hand, ``Add(a, b, c, d)`` does not assume nested
structure, making the complexity linear.
Since addition is group operation, every argument should have the
same :obj:`sympy.core.kind.Kind()`.
Examples
========
>>> from sympy import Add, I
>>> from sympy.abc import x, y
>>> Add(x, 1)
x + 1
>>> Add(x, x)
2*x
>>> 2*x**2 + 3*x + I*y + 2*y + 2*x/5 + 1.0*y + 1
2*x**2 + 17*x/5 + 3.0*y + I*y + 1
If ``evaluate=False`` is passed, result is not evaluated.
>>> Add(1, 2, evaluate=False)
1 + 2
>>> Add(x, x, evaluate=False)
x + x
``Add()`` also represents the general structure of addition operation.
>>> from sympy import MatrixSymbol
>>> A,B = MatrixSymbol('A', 2,2), MatrixSymbol('B', 2,2)
>>> expr = Add(x,y).subs({x:A, y:B})
>>> expr
A + B
>>> type(expr)
<class 'sympy.matrices.expressions.matadd.MatAdd'>
Note that the printers do not display in args order.
>>> Add(x, 1)
x + 1
>>> Add(x, 1).args
(1, x)
See Also
========
MatAdd
"""
__slots__ = ()
is_Add = True
_args_type = Expr
identity: ClassVar[Expr]
if TYPE_CHECKING:
def __new__(cls, *args: Expr | complex, evaluate: bool=True) -> Expr: # type: ignore
...
@property
def args(self) -> tuple[Expr, ...]:
...
@classmethod
def flatten(cls, seq: list[Expr]) -> tuple[list[Expr], list[Expr], None]:
"""
Takes the sequence "seq" of nested Adds and returns a flatten list.
Returns: (commutative_part, noncommutative_part, order_symbols)
Applies associativity, all terms are commutable with respect to
addition.
NB: the removal of 0 is already handled by AssocOp.__new__
See Also
========
sympy.core.mul.Mul.flatten
"""
from sympy.calculus.accumulationbounds import AccumBounds
from sympy.matrices.expressions import MatrixExpr
from sympy.tensor.tensor import TensExpr, TensAdd
if len(seq) == 2:
a, b = seq
if b.is_Rational:
a, b = b, a
if a.is_Rational and b.is_Mul:
if a.is_commutative and b.is_commutative:
return [a, b], [], None
else:
return [], [a, b], None
# term -> coeff
# e.g. x**2 -> 5 for ... + 5*x**2 + ...
terms: dict[Expr, Number] = {}
# coefficient (Number or zoo) to always be in slot 0
# e.g. 3 + ...
coeff: Expr = S.Zero
order_factors: list[Order] = []
extra: list[MatrixExpr] = []
for o in seq:
# O(x)
if o.is_Order:
if o.expr.is_zero: # type: ignore
continue
if any(o1.contains(o) for o1 in order_factors):
continue
order_factors = [o1 for o1 in order_factors if not o.contains(o1)] # type: ignore
order_factors = [o] + order_factors # type: ignore
continue
# 3 or NaN
elif o.is_Number:
if (o is S.NaN or coeff is S.ComplexInfinity and
o.is_finite is False) and not extra:
# we know for sure the result will be nan
return [S.NaN], [], None
if coeff.is_Number or isinstance(coeff, AccumBounds):
coeff += o
if coeff is S.NaN and not extra:
# we know for sure the result will be nan
return [S.NaN], [], None
continue
elif isinstance(o, AccumBounds):
coeff = o.__add__(coeff)
continue
elif isinstance(o, MatrixExpr):
# can't add 0 to Matrix so make sure coeff is not 0
extra.append(o)
continue
elif isinstance(o, TensExpr):
coeff = TensAdd(o, coeff).doit(deep=False)
continue
elif o is S.ComplexInfinity:
if coeff.is_finite is False and not extra:
# we know for sure the result will be nan
return [S.NaN], [], None
coeff = S.ComplexInfinity
continue
# Add([...])
elif o.is_Add:
# NB: here we assume Add is always commutative
o_args: tuple[Expr, ...] = o.args # type: ignore
seq.extend(o_args) # TODO zerocopy?
continue
# Mul([...])
elif o.is_Mul:
c, s = o.as_coeff_Mul()
# check for unevaluated Pow, e.g. 2**3 or 2**(-1/2)
elif o.is_Pow:
b, e = o.as_base_exp()
if b.is_Number and (e.is_Integer or
(e.is_Rational and e.is_negative)):
seq.append(b**e)
continue
c, s = S.One, o
else:
# everything else
c = S.One
s = o
# now we have:
# o = c*s, where
#
# c is a Number
# s is an expression with number factor extracted
# let's collect terms with the same s, so e.g.
# 2*x**2 + 3*x**2 -> 5*x**2
if s in terms:
terms[s] += c
if terms[s] is S.NaN and not extra:
# we know for sure the result will be nan
return [S.NaN], [], None
else:
terms[s] = c
# now let's construct new args:
# [2*x**2, x**3, 7*x**4, pi, ...]
newseq = []
noncommutative = False
for s, c in terms.items():
# 0*s
if c.is_zero:
continue
# 1*s
elif c is S.One:
newseq.append(s)
# c*s
else:
if s.is_Mul:
# Mul, already keeps its arguments in perfect order.
# so we can simply put c in slot0 and go the fast way.
#
# XXX: This breaks VectorMul unless it overrides
# _new_rawargs
cs = s._new_rawargs(*((c,) + s.args)) # type: ignore
newseq.append(cs)
elif s.is_Add:
# we just re-create the unevaluated Mul
newseq.append(Mul(c, s, evaluate=False))
else:
# alternatively we have to call all Mul's machinery (slow)
newseq.append(Mul(c, s))
noncommutative = noncommutative or not s.is_commutative
# oo, -oo
if coeff is S.Infinity:
newseq = [f for f in newseq if not (f.is_extended_nonnegative or f.is_real)]
elif coeff is S.NegativeInfinity:
newseq = [f for f in newseq if not (f.is_extended_nonpositive or f.is_real)]
if coeff is S.ComplexInfinity:
# zoo might be
# infinite_real + finite_im
# finite_real + infinite_im
# infinite_real + infinite_im
# addition of a finite real or imaginary number won't be able to
# change the zoo nature; adding an infinite qualtity would result
# in a NaN condition if it had sign opposite of the infinite
# portion of zoo, e.g., infinite_real - infinite_real.
newseq = [c for c in newseq if not (c.is_finite and
c.is_extended_real is not None)]
# process O(x)
if order_factors:
newseq2 = []
for t in newseq:
# x + O(x) -> O(x)
if not any(o.contains(t) for o in order_factors):
newseq2.append(t)
newseq = newseq2 + order_factors # type: ignore
# 1 + O(1) -> O(1)
for o in order_factors:
if o.contains(coeff):
coeff = S.Zero
break
# order args canonically
_addsort(newseq)
# current code expects coeff to be first
if coeff is not S.Zero:
newseq.insert(0, coeff)
if extra:
newseq += extra
noncommutative = True
# we are done
if noncommutative:
return [], newseq, None
else:
return newseq, [], None
@classmethod
def class_key(cls):
return 3, 1, cls.__name__
@property
def kind(self):
k = attrgetter('kind')
kinds = map(k, self.args)
kinds = frozenset(kinds)
if len(kinds) != 1:
# Since addition is group operator, kind must be same.
# We know that this is unexpected signature, so return this.
result = UndefinedKind
else:
result, = kinds
return result
def could_extract_minus_sign(self):
return _could_extract_minus_sign(self)
@cacheit
def as_coeff_add(self, *deps):
"""
Returns a tuple (coeff, args) where self is treated as an Add and coeff
is the Number term and args is a tuple of all other terms.
Examples
========
>>> from sympy.abc import x
>>> (7 + 3*x).as_coeff_add()
(7, (3*x,))
>>> (7*x).as_coeff_add()
(0, (7*x,))
"""
if deps:
l1, l2 = sift(self.args, lambda x: x.has_free(*deps), binary=True)
return self._new_rawargs(*l2), tuple(l1)
coeff, notrat = self.args[0].as_coeff_add()
if coeff is not S.Zero:
return coeff, notrat + self.args[1:]
return S.Zero, self.args
def as_coeff_Add(self, rational=False, deps=None) -> tuple[Number, Expr]:
"""
Efficiently extract the coefficient of a summation.
"""
coeff, args = self.args[0], self.args[1:]
if coeff.is_Number and not rational or coeff.is_Rational:
return coeff, self._new_rawargs(*args) # type: ignore
return S.Zero, self
# Note, we intentionally do not implement Add.as_coeff_mul(). Rather, we
# let Expr.as_coeff_mul() just always return (S.One, self) for an Add. See
# issue 5524.
def _eval_power(self, expt):
from .evalf import pure_complex
from .relational import is_eq
if len(self.args) == 2 and any(_.is_infinite for _ in self.args):
if expt.is_zero is False and is_eq(expt, S.One) is False:
# looking for literal a + I*b
a, b = self.args
if a.coeff(S.ImaginaryUnit):
a, b = b, a
ico = b.coeff(S.ImaginaryUnit)
if ico and ico.is_extended_real and a.is_extended_real:
if expt.is_extended_negative:
return S.Zero
if expt.is_extended_positive:
return S.ComplexInfinity
return
if expt.is_Rational and self.is_number:
ri = pure_complex(self)
if ri:
r, i = ri
if expt.q == 2:
from sympy.functions.elementary.miscellaneous import sqrt
D = sqrt(r**2 + i**2)
if D.is_Rational:
from .exprtools import factor_terms
from sympy.functions.elementary.complexes import sign
from .function import expand_multinomial
# (r, i, D) is a Pythagorean triple
root = sqrt(factor_terms((D - r)/2))**expt.p
return root*expand_multinomial((
# principle value
(D + r)/abs(i) + sign(i)*S.ImaginaryUnit)**expt.p)
elif expt == -1:
return _unevaluated_Mul(
r - i*S.ImaginaryUnit,
1/(r**2 + i**2))
@cacheit
def _eval_derivative(self, s):
return self.func(*[a.diff(s) for a in self.args])
def _eval_nseries(self, x, n, logx, cdir=0):
terms = [t.nseries(x, n=n, logx=logx, cdir=cdir) for t in self.args]
return self.func(*terms)
def _matches_simple(self, expr, repl_dict):
# handle (w+3).matches('x+5') -> {w: x+2}
coeff, terms = self.as_coeff_add()
if len(terms) == 1:
return terms[0].matches(expr - coeff, repl_dict)
return
def matches(self, expr, repl_dict=None, old=False):
return self._matches_commutative(expr, repl_dict, old)
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs - rhs, but treats oo like a symbol so oo - oo
returns 0, instead of a nan.
"""
from sympy.simplify.simplify import signsimp
inf = (S.Infinity, S.NegativeInfinity)
if lhs.has(*inf) or rhs.has(*inf):
from .symbol import Dummy
oo = Dummy('oo')
reps = {
S.Infinity: oo,
S.NegativeInfinity: -oo}
ireps = {v: k for k, v in reps.items()}
eq = lhs.xreplace(reps) - rhs.xreplace(reps)
if eq.has(oo):
eq = eq.replace(
lambda x: x.is_Pow and x.base is oo,
lambda x: x.base)
rv = eq.xreplace(ireps)
else:
rv = lhs - rhs
srv = signsimp(rv)
return srv if srv.is_Number else rv
@cacheit
def as_two_terms(self):
"""Return head and tail of self.
This is the most efficient way to get the head and tail of an
expression.
- if you want only the head, use self.args[0];
- if you want to process the arguments of the tail then use
self.as_coef_add() which gives the head and a tuple containing
the arguments of the tail when treated as an Add.
- if you want the coefficient when self is treated as a Mul
then use self.as_coeff_mul()[0]
>>> from sympy.abc import x, y
>>> (3*x - 2*y + 5).as_two_terms()
(5, 3*x - 2*y)
"""
return self.args[0], self._new_rawargs(*self.args[1:])
def as_numer_denom(self) -> tuple[Expr, Expr]:
"""
Decomposes an expression to its numerator part and its
denominator part.
Examples
========
>>> from sympy.abc import x, y, z
>>> (x*y/z).as_numer_denom()
(x*y, z)
>>> (x*(y + 1)/y**7).as_numer_denom()
(x*(y + 1), y**7)
See Also
========
sympy.core.expr.Expr.as_numer_denom
"""
# clear rational denominator
content, expr = self.primitive()
if not isinstance(expr, Add):
return Mul(content, expr, evaluate=False).as_numer_denom()
ncon, dcon = content.as_numer_denom()
# collect numerators and denominators of the terms
nd = defaultdict(list)
for f in expr.args:
ni, di = f.as_numer_denom()
nd[di].append(ni)
# check for quick exit
if len(nd) == 1:
d, n = nd.popitem()
return self.func(
*[_keep_coeff(ncon, ni) for ni in n]), _keep_coeff(dcon, d)
# sum up the terms having a common denominator
nd2 = {d: self.func(*n) if len(n) > 1 else n[0] for d, n in nd.items()}
# assemble single numerator and denominator
denoms, numers = [list(i) for i in zip(*iter(nd2.items()))]
n, d = self.func(*[Mul(*(denoms[:i] + [numers[i]] + denoms[i + 1:]))
for i in range(len(numers))]), Mul(*denoms)
return _keep_coeff(ncon, n), _keep_coeff(dcon, d)
def _eval_is_polynomial(self, syms):
return all(term._eval_is_polynomial(syms) for term in self.args)
def _eval_is_rational_function(self, syms):
return all(term._eval_is_rational_function(syms) for term in self.args)
def _eval_is_meromorphic(self, x, a):
return _fuzzy_group((arg.is_meromorphic(x, a) for arg in self.args),
quick_exit=True)
def _eval_is_algebraic_expr(self, syms):
return all(term._eval_is_algebraic_expr(syms) for term in self.args)
# assumption methods
_eval_is_real = lambda self: _fuzzy_group(
(a.is_real for a in self.args), quick_exit=True)
_eval_is_extended_real = lambda self: _fuzzy_group(
(a.is_extended_real for a in self.args), quick_exit=True)
_eval_is_complex = lambda self: _fuzzy_group(
(a.is_complex for a in self.args), quick_exit=True)
_eval_is_antihermitian = lambda self: _fuzzy_group(
(a.is_antihermitian for a in self.args), quick_exit=True)
_eval_is_finite = lambda self: _fuzzy_group(
(a.is_finite for a in self.args), quick_exit=True)
_eval_is_hermitian = lambda self: _fuzzy_group(
(a.is_hermitian for a in self.args), quick_exit=True)
_eval_is_integer = lambda self: _fuzzy_group(
(a.is_integer for a in self.args), quick_exit=True)
_eval_is_rational = lambda self: _fuzzy_group(
(a.is_rational for a in self.args), quick_exit=True)
_eval_is_algebraic = lambda self: _fuzzy_group(
(a.is_algebraic for a in self.args), quick_exit=True)
_eval_is_commutative = lambda self: _fuzzy_group(
a.is_commutative for a in self.args)
def _eval_is_infinite(self):
sawinf = False
for a in self.args:
ainf = a.is_infinite
if ainf is None:
return None
elif ainf is True:
# infinite+infinite might not be infinite
if sawinf is True:
return None
sawinf = True
return sawinf
def _eval_is_imaginary(self):
nz = []
im_I = []
for a in self.args:
if a.is_extended_real:
if a.is_zero:
pass
elif a.is_zero is False:
nz.append(a)
else:
return
elif a.is_imaginary:
im_I.append(a*S.ImaginaryUnit)
elif a.is_Mul and S.ImaginaryUnit in a.args:
coeff, ai = a.as_coeff_mul(S.ImaginaryUnit)
if ai == (S.ImaginaryUnit,) and coeff.is_extended_real:
im_I.append(-coeff)
else:
return
else:
return
b = self.func(*nz)
if b != self:
if b.is_zero:
return fuzzy_not(self.func(*im_I).is_zero)
elif b.is_zero is False:
return False
def _eval_is_zero(self):
if self.is_commutative is False:
# issue 10528: there is no way to know if a nc symbol
# is zero or not
return
nz = []
z = 0
im_or_z = False
im = 0
for a in self.args:
if a.is_extended_real:
if a.is_zero:
z += 1
elif a.is_zero is False:
nz.append(a)
else:
return
elif a.is_imaginary:
im += 1
elif a.is_Mul and S.ImaginaryUnit in a.args:
coeff, ai = a.as_coeff_mul(S.ImaginaryUnit)
if ai == (S.ImaginaryUnit,) and coeff.is_extended_real:
im_or_z = True
else:
return
else:
return
if z == len(self.args):
return True
if len(nz) in [0, len(self.args)]:
return None
b = self.func(*nz)
if b.is_zero:
if not im_or_z:
if im == 0:
return True
elif im == 1:
return False
if b.is_zero is False:
return False
def _eval_is_odd(self):
l = [f for f in self.args if not (f.is_even is True)]
if not l:
return False
if l[0].is_odd:
return self._new_rawargs(*l[1:]).is_even
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a:
others = list(self.args)
others.remove(t)
if all(x.is_rational is True for x in others):
return True
return None
if a is None:
return
return False
def _all_nonneg_or_nonppos(self):
nn = np = 0
for a in self.args:
if a.is_nonnegative:
if np:
return False
nn = 1
elif a.is_nonpositive:
if nn:
return False
np = 1
else:
break
else:
return True
def _eval_is_extended_positive(self):
if self.is_number:
return super()._eval_is_extended_positive()
c, a = self.as_coeff_Add()
if not c.is_zero:
from .exprtools import _monotonic_sign
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s != self and s.is_extended_positive and a.is_extended_nonnegative:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v != self and v.is_extended_positive:
return True
pos = nonneg = nonpos = unknown_sign = False
saw_INF = set()
args = [a for a in self.args if not a.is_zero]
if not args:
return False
for a in args:
ispos = a.is_extended_positive
infinite = a.is_infinite
if infinite:
saw_INF.add(fuzzy_or((ispos, a.is_extended_nonnegative)))
if True in saw_INF and False in saw_INF:
return
if ispos:
pos = True
continue
elif a.is_extended_nonnegative:
nonneg = True
continue
elif a.is_extended_nonpositive:
nonpos = True
continue
if infinite is None:
return
unknown_sign = True
if saw_INF:
if len(saw_INF) > 1:
return
return saw_INF.pop()
elif unknown_sign:
return
elif pos and not nonpos:
return True
elif not pos and not nonneg:
return False
def _eval_is_extended_nonnegative(self):
if not self.is_number:
c, a = self.as_coeff_Add()
if not c.is_zero and a.is_extended_nonnegative:
from .exprtools import _monotonic_sign
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s != self and s.is_extended_nonnegative:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v != self and v.is_extended_nonnegative:
return True
def _eval_is_extended_nonpositive(self):
if not self.is_number:
c, a = self.as_coeff_Add()
if not c.is_zero and a.is_extended_nonpositive:
from .exprtools import _monotonic_sign
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s != self and s.is_extended_nonpositive:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v != self and v.is_extended_nonpositive:
return True
def _eval_is_extended_negative(self):
if self.is_number:
return super()._eval_is_extended_negative()
c, a = self.as_coeff_Add()
if not c.is_zero:
from .exprtools import _monotonic_sign
v = _monotonic_sign(a)
if v is not None:
s = v + c
if s != self and s.is_extended_negative and a.is_extended_nonpositive:
return True
if len(self.free_symbols) == 1:
v = _monotonic_sign(self)
if v is not None and v != self and v.is_extended_negative:
return True
neg = nonpos = nonneg = unknown_sign = False
saw_INF = set()
args = [a for a in self.args if not a.is_zero]
if not args:
return False
for a in args:
isneg = a.is_extended_negative
infinite = a.is_infinite
if infinite:
saw_INF.add(fuzzy_or((isneg, a.is_extended_nonpositive)))
if True in saw_INF and False in saw_INF:
return
if isneg:
neg = True
continue
elif a.is_extended_nonpositive:
nonpos = True
continue
elif a.is_extended_nonnegative:
nonneg = True
continue
if infinite is None:
return
unknown_sign = True
if saw_INF:
if len(saw_INF) > 1:
return
return saw_INF.pop()
elif unknown_sign:
return
elif neg and not nonneg:
return True
elif not neg and not nonpos:
return False
def _eval_subs(self, old, new):
if not old.is_Add:
if old is S.Infinity and -old in self.args:
# foo - oo is foo + (-oo) internally
return self.xreplace({-old: -new})
return None
coeff_self, terms_self = self.as_coeff_Add()
coeff_old, terms_old = old.as_coeff_Add()
if coeff_self.is_Rational and coeff_old.is_Rational:
if terms_self == terms_old: # (2 + a).subs( 3 + a, y) -> -1 + y
return self.func(new, coeff_self, -coeff_old)
if terms_self == -terms_old: # (2 + a).subs(-3 - a, y) -> -1 - y
return self.func(-new, coeff_self, coeff_old)
if coeff_self.is_Rational and coeff_old.is_Rational \
or coeff_self == coeff_old:
args_old, args_self = self.func.make_args(
terms_old), self.func.make_args(terms_self)
if len(args_old) < len(args_self): # (a+b+c).subs(b+c,x) -> a+x
self_set = set(args_self)
old_set = set(args_old)
if old_set < self_set:
ret_set = self_set - old_set
return self.func(new, coeff_self, -coeff_old,
*[s._subs(old, new) for s in ret_set])
args_old = self.func.make_args(
-terms_old) # (a+b+c+d).subs(-b-c,x) -> a-x+d
old_set = set(args_old)
if old_set < self_set:
ret_set = self_set - old_set
return self.func(-new, coeff_self, coeff_old,
*[s._subs(old, new) for s in ret_set])
def removeO(self):
args = [a for a in self.args if not a.is_Order]
return self._new_rawargs(*args)
def getO(self):
args = [a for a in self.args if a.is_Order]
if args:
return self._new_rawargs(*args)
@cacheit
def extract_leading_order(self, symbols, point=None):
"""
Returns the leading term and its order.
Examples
========
>>> from sympy.abc import x
>>> (x + 1 + 1/x**5).extract_leading_order(x)
((x**(-5), O(x**(-5))),)
>>> (1 + x).extract_leading_order(x)
((1, O(1)),)
>>> (x + x**2).extract_leading_order(x)
((x, O(x)),)
"""
from sympy.series.order import Order
lst = []
symbols = list(symbols if is_sequence(symbols) else [symbols])
if not point:
point = [0]*len(symbols)
seq = [(f, Order(f, *zip(symbols, point))) for f in self.args]
for ef, of in seq:
for e, o in lst:
if o.contains(of) and o != of:
of = None
break
if of is None:
continue
new_lst = [(ef, of)]
for e, o in lst:
if of.contains(o) and o != of:
continue
new_lst.append((e, o))
lst = new_lst
return tuple(lst)
def as_real_imag(self, deep=True, **hints):
"""
Return a tuple representing a complex number.
Examples
========
>>> from sympy import I
>>> (7 + 9*I).as_real_imag()
(7, 9)
>>> ((1 + I)/(1 - I)).as_real_imag()
(0, 1)
>>> ((1 + 2*I)*(1 + 3*I)).as_real_imag()
(-5, 5)
"""
sargs = self.args
re_part, im_part = [], []
for term in sargs:
re, im = term.as_real_imag(deep=deep)
re_part.append(re)
im_part.append(im)
return (self.func(*re_part), self.func(*im_part))
def _eval_as_leading_term(self, x, logx, cdir):
from sympy.core.symbol import Dummy, Symbol
from sympy.series.order import Order
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from .function import expand_mul
o = self.getO()
if o is None:
o = Order(0)
old = self.removeO()
if old.has(Piecewise):
old = piecewise_fold(old)
# This expansion is the last part of expand_log. expand_log also calls
# expand_mul with factor=True, which would be more expensive
if any(isinstance(a, log) for a in self.args):
logflags = {"deep": True, "log": True, "mul": False, "power_exp": False,
"power_base": False, "multinomial": False, "basic": False, "force": False,
"factor": False}
old = old.expand(**logflags)
expr = expand_mul(old)
if not expr.is_Add:
return expr.as_leading_term(x, logx=logx, cdir=cdir)
infinite = [t for t in expr.args if t.is_infinite]
_logx = Dummy('logx') if logx is None else logx
leading_terms = [t.as_leading_term(x, logx=_logx, cdir=cdir) for t in expr.args]
min, new_expr = Order(0), S.Zero
try:
for term in leading_terms:
order = Order(term, x)
if not min or order not in min:
min = order
new_expr = term
elif min in order:
new_expr += term
except TypeError:
return expr
if logx is None:
new_expr = new_expr.subs(_logx, log(x))
is_zero = new_expr.is_zero
if is_zero is None:
new_expr = new_expr.trigsimp().cancel()
is_zero = new_expr.is_zero
if is_zero is True:
# simple leading term analysis gave us cancelled terms but we have to send
# back a term, so compute the leading term (via series)
try:
n0 = min.getn()
except NotImplementedError:
n0 = S.One
if n0.has(Symbol):
n0 = S.One
res = Order(1)
incr = S.One
while res.is_Order:
res = old._eval_nseries(x, n=n0+incr, logx=logx, cdir=cdir).cancel().powsimp().trigsimp()
incr *= 2
return res.as_leading_term(x, logx=logx, cdir=cdir)
elif new_expr is S.NaN:
return old.func._from_args(infinite) + o
else:
return new_expr
def _eval_adjoint(self):
return self.func(*[t.adjoint() for t in self.args])
def _eval_conjugate(self):
return self.func(*[t.conjugate() for t in self.args])
def _eval_transpose(self):
return self.func(*[t.transpose() for t in self.args])
def primitive(self):
"""
Return ``(R, self/R)`` where ``R``` is the Rational GCD of ``self```.
``R`` is collected only from the leading coefficient of each term.
Examples
========
>>> from sympy.abc import x, y
>>> (2*x + 4*y).primitive()
(2, x + 2*y)
>>> (2*x/3 + 4*y/9).primitive()
(2/9, 3*x + 2*y)
>>> (2*x/3 + 4.2*y).primitive()
(1/3, 2*x + 12.6*y)
No subprocessing of term factors is performed:
>>> ((2 + 2*x)*x + 2).primitive()
(1, x*(2*x + 2) + 2)
Recursive processing can be done with the ``as_content_primitive()``
method:
>>> ((2 + 2*x)*x + 2).as_content_primitive()
(2, x*(x + 1) + 1)
See also: primitive() function in polytools.py
"""
terms = []
inf = False
for a in self.args:
c, m = a.as_coeff_Mul()
if not c.is_Rational:
c = S.One
m = a
inf = inf or m is S.ComplexInfinity
terms.append((c.p, c.q, m))
if not inf:
ngcd = reduce(igcd, [t[0] for t in terms], 0)
dlcm = reduce(ilcm, [t[1] for t in terms], 1)
else:
ngcd = reduce(igcd, [t[0] for t in terms if t[1]], 0)
dlcm = reduce(ilcm, [t[1] for t in terms if t[1]], 1)
if ngcd == dlcm == 1:
return S.One, self
if not inf:
for i, (p, q, term) in enumerate(terms):
terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term)
else:
for i, (p, q, term) in enumerate(terms):
if q:
terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term)
else:
terms[i] = _keep_coeff(Rational(p, q), term)
# we don't need a complete re-flattening since no new terms will join
# so we just use the same sort as is used in Add.flatten. When the
# coefficient changes, the ordering of terms may change, e.g.
# (3*x, 6*y) -> (2*y, x)
#
# We do need to make sure that term[0] stays in position 0, however.
#
if terms[0].is_Number or terms[0] is S.ComplexInfinity:
c = terms.pop(0)
else:
c = None
_addsort(terms)
if c:
terms.insert(0, c)
return Rational(ngcd, dlcm), self._new_rawargs(*terms)
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self. If radical is True (default is False) then
common radicals will be removed and included as a factor of the
primitive expression.
Examples
========
>>> from sympy import sqrt
>>> (3 + 3*sqrt(2)).as_content_primitive()
(3, 1 + sqrt(2))
Radical content can also be factored out of the primitive:
>>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)
(2, sqrt(2)*(1 + 2*sqrt(5)))
See docstring of Expr.as_content_primitive for more examples.
"""
con, prim = self.func(*[_keep_coeff(*a.as_content_primitive(
radical=radical, clear=clear)) for a in self.args]).primitive()
if not clear and not con.is_Integer and prim.is_Add:
con, d = con.as_numer_denom()
_p = prim/d
if any(a.as_coeff_Mul()[0].is_Integer for a in _p.args):
prim = _p
else:
con /= d
if radical and prim.is_Add:
# look for common radicals that can be removed
args = prim.args
rads = []
common_q = None
for m in args:
term_rads = defaultdict(list)
for ai in Mul.make_args(m):
if ai.is_Pow:
b, e = ai.as_base_exp()
if e.is_Rational and b.is_Integer:
term_rads[e.q].append(abs(int(b))**e.p)
if not term_rads:
break
if common_q is None:
common_q = set(term_rads.keys())
else:
common_q = common_q & set(term_rads.keys())
if not common_q:
break
rads.append(term_rads)
else:
# process rads
# keep only those in common_q
for r in rads:
for q in list(r.keys()):
if q not in common_q:
r.pop(q)
for q in r:
r[q] = Mul(*r[q])
# find the gcd of bases for each q
G = []
for q in common_q:
g = reduce(igcd, [r[q] for r in rads], 0)
if g != 1:
G.append(g**Rational(1, q))
if G:
G = Mul(*G)
args = [ai/G for ai in args]
prim = G*prim.func(*args)
return con, prim
@property
def _sorted_args(self):
from .sorting import default_sort_key
return tuple(sorted(self.args, key=default_sort_key))
def _eval_difference_delta(self, n, step):
from sympy.series.limitseq import difference_delta as dd
return self.func(*[dd(a, n, step) for a in self.args])
@property
def _mpc_(self):
"""
Convert self to an mpmath mpc if possible
"""
from .numbers import Float
re_part, rest = self.as_coeff_Add()
im_part, imag_unit = rest.as_coeff_Mul()
if not imag_unit == S.ImaginaryUnit:
# ValueError may seem more reasonable but since it's a @property,
# we need to use AttributeError to keep from confusing things like
# hasattr.
raise AttributeError("Cannot convert Add to mpc. Must be of the form Number + Number*I")
return (Float(re_part)._mpf_, Float(im_part)._mpf_)
def __neg__(self):
if not global_parameters.distribute:
return super().__neg__()
return Mul(S.NegativeOne, self)
add = AssocOpDispatcher('add')
from .mul import Mul, _keep_coeff, _unevaluated_Mul
from .numbers import Rational
| Add |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_device_allocation_configuration.py | {
"start": 383,
"end": 6291
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'opaque': 'V1beta2OpaqueDeviceConfiguration',
'requests': 'list[str]',
'source': 'str'
}
attribute_map = {
'opaque': 'opaque',
'requests': 'requests',
'source': 'source'
}
def __init__(self, opaque=None, requests=None, source=None, local_vars_configuration=None): # noqa: E501
"""V1beta2DeviceAllocationConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._opaque = None
self._requests = None
self._source = None
self.discriminator = None
if opaque is not None:
self.opaque = opaque
if requests is not None:
self.requests = requests
self.source = source
@property
def opaque(self):
"""Gets the opaque of this V1beta2DeviceAllocationConfiguration. # noqa: E501
:return: The opaque of this V1beta2DeviceAllocationConfiguration. # noqa: E501
:rtype: V1beta2OpaqueDeviceConfiguration
"""
return self._opaque
@opaque.setter
def opaque(self, opaque):
"""Sets the opaque of this V1beta2DeviceAllocationConfiguration.
:param opaque: The opaque of this V1beta2DeviceAllocationConfiguration. # noqa: E501
:type: V1beta2OpaqueDeviceConfiguration
"""
self._opaque = opaque
@property
def requests(self):
"""Gets the requests of this V1beta2DeviceAllocationConfiguration. # noqa: E501
Requests lists the names of requests where the configuration applies. If empty, its applies to all requests. References to subrequests must include the name of the main request and may include the subrequest using the format <main request>[/<subrequest>]. If just the main request is given, the configuration applies to all subrequests. # noqa: E501
:return: The requests of this V1beta2DeviceAllocationConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._requests
@requests.setter
def requests(self, requests):
"""Sets the requests of this V1beta2DeviceAllocationConfiguration.
Requests lists the names of requests where the configuration applies. If empty, its applies to all requests. References to subrequests must include the name of the main request and may include the subrequest using the format <main request>[/<subrequest>]. If just the main request is given, the configuration applies to all subrequests. # noqa: E501
:param requests: The requests of this V1beta2DeviceAllocationConfiguration. # noqa: E501
:type: list[str]
"""
self._requests = requests
@property
def source(self):
"""Gets the source of this V1beta2DeviceAllocationConfiguration. # noqa: E501
Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim. # noqa: E501
:return: The source of this V1beta2DeviceAllocationConfiguration. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this V1beta2DeviceAllocationConfiguration.
Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim. # noqa: E501
:param source: The source of this V1beta2DeviceAllocationConfiguration. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and source is None: # noqa: E501
raise ValueError("Invalid value for `source`, must not be `None`") # noqa: E501
self._source = source
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2DeviceAllocationConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2DeviceAllocationConfiguration):
return True
return self.to_dict() != other.to_dict()
| V1beta2DeviceAllocationConfiguration |
python | facebook__pyre-check | tools/typeshed_patcher/typeshed.py | {
"start": 3310,
"end": 3882
} | class ____(Typeshed):
"""
A typeshed backed up by in-memory content. Essentially a wrapper around
a dictonary from paths to their contents.
This class is mostly useful for testing.
"""
contents: Mapping[pathlib.Path, str]
def __init__(self, contents: Mapping[pathlib.Path, str]) -> None:
self.contents = contents
def all_files(self) -> Iterable[pathlib.Path]:
return self.contents.keys()
def get_file_content(self, path: pathlib.Path) -> Optional[str]:
return self.contents.get(path, None)
| MemoryBackedTypeshed |
python | huggingface__transformers | src/transformers/models/flava/modeling_flava.py | {
"start": 2074,
"end": 4091
} | class ____(ModelOutput):
r"""
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`].
image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`].
text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
The output of the [`FlavaTextModel`].
multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
The output of the [`FlavaMultimodalModel`].
"""
image_embeddings: Optional[torch.FloatTensor] = None
image_output: Optional[BaseModelOutputWithPooling] = None
text_embeddings: Optional[torch.FloatTensor] = None
text_output: Optional[BaseModelOutputWithPooling] = None
multimodal_embeddings: Optional[torch.FloatTensor] = None
multimodal_output: Optional[BaseModelOutputWithPooling] = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_output", "image_output", "multimodal_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
@dataclass
@auto_docstring(
custom_intro="""
Class representing pretraining losses from FLAVA model
"""
)
| FlavaModelOutput |
python | encode__django-rest-framework | tests/test_middleware.py | {
"start": 3946,
"end": 4779
} | class ____(APITestCase):
"""
Django's 5.1+ LoginRequiredMiddleware should NOT apply to DRF views.
Instead, users should put IsAuthenticated in their
DEFAULT_PERMISSION_CLASSES setting.
"""
def test_class_based_view(self):
response = self.client.get('/get')
assert response.status_code == status.HTTP_200_OK
def test_function_based_view(self):
response = self.client.get('/get-func')
assert response.status_code == status.HTTP_200_OK
def test_viewset_list(self):
response = self.client.get('/api/view-set/')
assert response.status_code == status.HTTP_200_OK
def test_viewset_list_action(self):
response = self.client.get('/api/view-set/list-action/')
assert response.status_code == status.HTTP_200_OK
| TestLoginRequiredMiddlewareCompat |
python | pandas-dev__pandas | pandas/tests/extension/base/__init__.py | {
"start": 2398,
"end": 2870
} | class ____(
BaseAccumulateTests,
BaseCastingTests,
BaseConstructorsTests,
BaseDtypeTests,
BaseGetitemTests,
BaseGroupbyTests,
BaseIndexTests,
BaseInterfaceTests,
BaseParsingTests,
BaseMethodsTests,
BaseMissingTests,
BaseArithmeticOpsTests,
BaseComparisonOpsTests,
BaseUnaryOpsTests,
BasePrintingTests,
BaseReduceTests,
BaseReshapingTests,
BaseSetitemTests,
Dim2CompatTests,
):
pass
| ExtensionTests |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/bijector_test.py | {
"start": 5293,
"end": 6841
} | class ____(metaclass=abc.ABCMeta):
@abc.abstractproperty
def broken_bijector_cls(self):
# return a BrokenBijector type Bijector, since this will test the caching.
raise IntentionallyMissingError("Not implemented")
def testCachingOfForwardResults(self):
broken_bijector = self.broken_bijector_cls(inverse_missing=True)
x = constant_op.constant(1.1)
# Call forward and forward_log_det_jacobian one-by-one (not together).
y = broken_bijector.forward(x)
_ = broken_bijector.forward_log_det_jacobian(x, event_ndims=0)
# Now, everything should be cached if the argument is y.
broken_bijector.inverse(y)
broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
# Different event_ndims should not be cached.
with self.assertRaises(IntentionallyMissingError):
broken_bijector.inverse_log_det_jacobian(y, event_ndims=1)
def testCachingOfInverseResults(self):
broken_bijector = self.broken_bijector_cls(forward_missing=True)
y = constant_op.constant(1.1)
# Call inverse and inverse_log_det_jacobian one-by-one (not together).
x = broken_bijector.inverse(y)
_ = broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
# Now, everything should be cached if the argument is x.
broken_bijector.forward(x)
broken_bijector.forward_log_det_jacobian(x, event_ndims=0)
# Different event_ndims should not be cached.
with self.assertRaises(IntentionallyMissingError):
broken_bijector.forward_log_det_jacobian(x, event_ndims=1)
| BijectorCachingTestBase |
python | pytorch__pytorch | torch/_dynamo/eval_frame.py | {
"start": 11768,
"end": 21757
} | class ____(torch.nn.Module):
"""
Wraps the original nn.Module object and later patches its
forward method to optimized self.forward method.
"""
_torchdynamo_orig_callable: Callable[..., Any]
get_compiler_config: Callable[[], Any]
_opt_mod_attributes = {
"_orig_mod",
"dynamo_ctx",
"_torchdynamo_orig_callable",
"get_compiler_config",
"forward",
"_forward",
"__dict__",
"named_children_walk",
"_super_module_initialized",
}
def __init__(self, mod: torch.nn.Module, dynamo_ctx: _TorchDynamoContext) -> None:
# NOTE: this must go first, because attribute reads/writes of `self`
# uses `_orig_mod`, and sometimes users override `Module.__init__` to
# do attribute reads/writes on `self`.
#
# We also can't use regular setattr because `super().__setattr__` will
# complain for module value before `super().__init__()`
object.__setattr__(self, "_orig_mod", mod)
self._super_module_initialized = False
super().__init__()
self._super_module_initialized = True
# Installs the params/buffer
self._orig_mod = mod # `super().__setattr__` will register this module
self.dynamo_ctx = dynamo_ctx
self._initialize()
self.training = self._orig_mod.training
def __len__(self) -> int:
# Proxy the len call to the original module
if isinstance(self._orig_mod, Sized):
return len(self._orig_mod)
# Mimic python's default behavior for objects without a length
raise TypeError(f"{type(self._orig_mod).__name__} does not support len()")
def _initialize(self) -> None:
# Do this stuff in constructor to lower overhead slightly
if isinstance(self.dynamo_ctx, DisableContext):
# No need to check trace rules
self.forward = self.dynamo_ctx(self._orig_mod.__call__)
elif config.wrap_top_frame or (
isinstance(self._orig_mod.forward, types.MethodType)
and (
trace_rules.check(self._orig_mod.forward)
or getattr(self._orig_mod, "_is_fsdp_managed_module", False)
)
):
# This may be a torch.nn.* instance in trace_rules.py which
# won't trigger a frame evaluation workaround to add an extra
# frame we can capture
self.forward = self.dynamo_ctx(external_utils.wrap_inline(self._orig_mod))
else:
# Invoke hooks outside of dynamo then pickup the inner frame
self.forward = self.dynamo_ctx(self._orig_mod.__call__)
if hasattr(self._orig_mod, "_initialize_hook"):
self._forward = self.forward
self.forward = self._call_lazy_check
def __call__(self, *args: Any, **kwargs: Any) -> Any:
if torch.nn.modules.module._has_any_global_hook():
warnings.warn(
"Using `torch.compile(module)` when there are global hooks on "
"modules (e.g., from `register_module_forward_hook`); this will"
" cause the hooks to fire an extra time for the "
"`OptimizedModule` created by `torch.compile(module)`. If this "
"causes undesired behavior, please try using `module.compile()`"
", or use the per-module hooks instead",
stacklevel=2,
)
return super().__call__(*args, **kwargs)
def _aot_compile(self, inputs: list[torch._dynamo.aot_compile.ModelInput]) -> None:
"""
Experimental: AOT Compile a set of inputs and use that as the forward function
"""
model = self._orig_mod
hooks = self.dynamo_ctx._hooks
assert hooks is not None
if not config.enable_aot_compile:
raise RuntimeError(
"AOT Compile is not enabled, please set torch._dynamo.config.enable_aot_config=True"
)
if not self.dynamo_ctx.fullgraph:
raise RuntimeError(
"Graph breaks are not supported with aot compile. Please use torch.compile(fullgraph=True)."
)
if not callable(self.dynamo_ctx.callback):
raise RuntimeError("aot compile requires a callable dynamo callback.")
backend = innermost_fn(
self.dynamo_ctx.callback, unaltered_fn_attr="_torchdynamo_orig_backend"
)
from torch._dynamo.aot_compile import aot_compile_module
self.forward = aot_compile_module(model, inputs, hooks, backend)
def _save_aot_compiled_module(self, path: Optional[str] = None) -> bytes:
if not config.enable_aot_compile:
raise RuntimeError(
"AOT Compile is not enabled, please set torch._dynamo.config.enable_aot_config=True"
)
from torch._dynamo.aot_compile import AOTCompiledModel
assert isinstance(self.forward, AOTCompiledModel)
result: bytes = self.forward.serialize()
if path is not None:
with open(path, "wb") as f:
f.write(result)
return result
def _load_aot_compiled_module(self, data: bytes) -> None:
if not config.enable_aot_compile:
raise RuntimeError(
"AOT Compile is not enabled, please set torch._dynamo.config.enable_aot_config=True"
)
from torch._dynamo.aot_compile import AOTCompiledModel
compiled_forward = AOTCompiledModel.deserialize(self._orig_mod, data)
assert isinstance(compiled_forward, AOTCompiledModel)
self.forward = compiled_forward
def __reduce__(
self,
) -> tuple[type[OptimizedModule], tuple[torch.nn.Module, _TorchDynamoContext]]:
return (self.__class__, (self._orig_mod, self.dynamo_ctx))
def __getstate__(self) -> dict[str, Any]:
state = dict(self.__dict__)
state.pop("forward", None)
state.pop("__call__", None)
return state
def __setstate__(self, state: dict[str, Any]) -> None:
self.__dict__ = state
self._initialize()
@property
# pyrefly: ignore [bad-override]
def training(self) -> bool:
return self._orig_mod.training
@training.setter
def training(self, value: bool) -> None:
# Ignore the `training` mutation in `super().__init__()`, since that's
# setting the default on `nn.Module`, but we are mirroring the
# `training` attr in `self._orig_mod`.
if self._super_module_initialized:
self._orig_mod.training = value
def __getattr__(self, name: str) -> Any:
if name == "_orig_mod":
return self._modules["_orig_mod"]
return getattr(self._orig_mod, name)
def __setattr__(self, name: str, val: Any) -> None:
# Allow patching over class attributes
if hasattr(type(self), name):
return super().__setattr__(name, val)
if name in OptimizedModule._opt_mod_attributes:
return super().__setattr__(name, val)
return setattr(self._orig_mod, name, val)
def __delattr__(self, name: str) -> None:
# This mirrors `__setattr__`
if hasattr(type(self), name):
return super().__delattr__(name)
if name in OptimizedModule._opt_mod_attributes:
return super().__delattr__(name)
return delattr(self._orig_mod, name)
def _call_lazy_check(self, *args: Any, **kwargs: Any) -> Any:
if (
hasattr(self._orig_mod, "_initialize_hook")
and hasattr(self._orig_mod, "_infer_parameters")
and callable(self._orig_mod._infer_parameters)
):
# In the case of a lazy module, we want to run
# the pre-hooks which initialize it.
# Afterwards, lazy module deletes its pre-hooks
# to avoid treating it as lazy on subsequent recompile.
self._orig_mod._infer_parameters(self._orig_mod, args, kwargs)
return self._forward(*args, **kwargs)
def __dir__(self) -> list[str]:
orig_mod_attrs = self._orig_mod.__dir__()
return orig_mod_attrs + [
attr for attr in super().__dir__() if attr not in orig_mod_attrs
]
def remove_from_cache(f: Any) -> None:
"""
Make sure f.__code__ is not cached to force a recompile
"""
if isinstance(f, types.CodeType):
reset_code(f)
elif hasattr(f, "__code__"):
reset_code(f.__code__)
elif hasattr(getattr(f, "forward", None), "__code__"):
reset_code(f.forward.__code__)
else:
from . import reset # type: ignore[attr-defined]
reset()
log.warning("could not determine __code__ for %s", f)
def nothing() -> None:
pass
def always_false() -> bool:
return False
def innermost_fn(
fn: Callable[..., Any], unaltered_fn_attr: str = "_torchdynamo_orig_callable"
) -> Callable[..., Any]:
"""
In case of nesting of _TorchDynamoContext calls, find the innermost
function. TorchDynamo caches on fn.__code__ object, so its necessary to find
the innermost function to pass on the optimize, run, disable etc.
"""
unaltered_fn = fn
while hasattr(unaltered_fn, unaltered_fn_attr):
unaltered_fn = getattr(unaltered_fn, unaltered_fn_attr)
assert callable(unaltered_fn), (
f"A callable function is expected, but {type(unaltered_fn)} is provided."
)
return unaltered_fn
def make_set_enable_dynamic(enable: bool) -> Any:
assert isinstance(enable, bool)
if enable:
# Assume everything is dynamic by default
return config._make_closure_patcher(assume_static_by_default=False)
else:
return config._make_closure_patcher(
automatic_dynamic_shapes=False, assume_static_by_default=True
)
# A thread local storage that serves to store information as Dynamo traces
# through a user provided function.
| OptimizedModule |
python | ipython__ipython | IPython/core/autocall.py | {
"start": 1593,
"end": 1983
} | class ____(ExitAutocall):
"""Exit IPython. Autocallable, so it needn't be explicitly called.
Parameters
----------
keep_kernel : bool
If True, leave the kernel alive. Otherwise, tell the kernel to exit too
(default).
"""
def __call__(self, keep_kernel=False):
self._ip.keepkernel_on_exit = keep_kernel
self._ip.ask_exit()
| ZMQExitAutocall |
python | tiangolo__fastapi | tests/test_tuples.py | {
"start": 190,
"end": 253
} | class ____(BaseModel):
items: List[Tuple[str, str]]
| ItemGroup |
python | tensorflow__tensorflow | tensorflow/python/ops/lookup_ops.py | {
"start": 67590,
"end": 79931
} | class ____(LookupInterface):
"""A generic mutable hash table implementation.
Data can be inserted by calling the `insert` method and removed by calling the
`remove` method. It does not support initialization via the init method.
`MutableHashTable` requires additional memory during checkpointing and restore
operations to create temporary key and value tensors.
Example usage:
>>> table = tf.lookup.experimental.MutableHashTable(key_dtype=tf.string,
... value_dtype=tf.int64,
... default_value=-1)
>>> keys_tensor = tf.constant(['a', 'b', 'c'])
>>> vals_tensor = tf.constant([7, 8, 9], dtype=tf.int64)
>>> input_tensor = tf.constant(['a', 'f'])
>>> table.insert(keys_tensor, vals_tensor)
>>> table.lookup(input_tensor).numpy()
array([ 7, -1])
>>> table.remove(tf.constant(['c']))
>>> table.lookup(keys_tensor).numpy()
array([ 7, 8, -1])
>>> sorted(table.export()[0].numpy())
[b'a', b'b']
>>> [a.item() for a in sorted(table.export()[1].numpy())]
[7, 8]
"""
def __init__(self,
key_dtype,
value_dtype,
default_value,
name="MutableHashTable",
checkpoint=True,
experimental_is_anonymous=False):
"""Creates an empty `MutableHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
experimental_is_anonymous: Whether to use anonymous mode for the
table (default is False). In anonymous mode, the table
resource can only be accessed via a resource handle. It can't
be looked up by a name. When all resource handles pointing to
that resource are gone, the resource will be deleted
automatically.
Returns:
A `MutableHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(
default_value, dtype=value_dtype)
self._value_shape = self._default_value.get_shape()
self._checkpoint = checkpoint
self._key_dtype = key_dtype
self._value_dtype = value_dtype
self._name = name
self._is_anonymous = experimental_is_anonymous
if not self._is_anonymous:
self._shared_name = None
if context.executing_eagerly():
# TODO(allenl): This will leak memory due to kernel caching by
# the shared_name attribute value (but is better than the
# alternative of sharing everything by default when executing
# eagerly; hopefully creating tables in a loop is uncommon).
self._shared_name = "table_%d" % (ops.uid(),)
super(MutableHashTable, self).__init__(key_dtype, value_dtype)
self._resource_handle = self._create_resource()
if checkpoint:
saveable = MutableHashTable._Saveable(self, name)
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def _create_resource(self):
if self._is_anonymous:
if self._default_value.get_shape().ndims == 0:
table_ref = gen_lookup_ops.anonymous_mutable_hash_table(
key_dtype=self._key_dtype,
value_dtype=self._value_dtype,
name=self._name)
else:
table_ref = gen_lookup_ops.anonymous_mutable_hash_table_of_tensors(
key_dtype=self._key_dtype,
value_dtype=self._value_dtype,
value_shape=self._default_value.get_shape(),
name=self._name)
else:
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = self._checkpoint and self._shared_name is None
if self._default_value.get_shape().ndims == 0:
table_ref = gen_lookup_ops.mutable_hash_table_v2(
shared_name=self._shared_name,
use_node_name_sharing=use_node_name_sharing,
key_dtype=self._key_dtype,
value_dtype=self._value_dtype,
name=self._name)
else:
table_ref = gen_lookup_ops.mutable_hash_table_of_tensors_v2(
shared_name=self._shared_name,
use_node_name_sharing=use_node_name_sharing,
key_dtype=self._key_dtype,
value_dtype=self._value_dtype,
value_shape=self._default_value.get_shape(),
name=self._name)
if context.executing_eagerly():
self._table_name = None
else:
self._table_name = table_ref.op.name.split("/")[-1]
return table_ref
@property
def name(self):
return self._table_name
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self.name, [self.resource_handle]):
with ops.colocate_with(self.resource_handle):
return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)
def remove(self, keys, name=None):
"""Removes `keys` and its associated values from the table.
If a key is not present in the table, it is silently ignored.
Args:
keys: Keys to remove. Can be a tensor of any shape. Must match the table's
key type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` do not match the table data types.
"""
if keys.dtype != self._key_dtype:
raise TypeError(f"Dtype of argument `keys` must be {self._key_dtype}, "
f"received: {keys.dtype}")
with ops.name_scope(name, "%s_lookup_table_remove" % self.name,
(self.resource_handle, keys, self._default_value)):
op = gen_lookup_ops.lookup_table_remove_v2(self.resource_handle, keys)
return op
def lookup(self, keys, dynamic_default_values=None, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
dynamic_default_values: The values to use if a key is missing in the
table. If None (by default), the `table.default_value` will be used.
Shape of `dynamic_default_values` must be same with
`table.default_value` or the lookup result tensor.
In the latter case, each key will have a different default value.
For example:
```python
keys = [0, 1, 3]
dynamic_default_values = [[1, 3, 4], [2, 3, 9], [8, 3, 0]]
# The key '0' will use [1, 3, 4] as default value.
# The key '1' will use [2, 3, 9] as default value.
# The key '3' will use [8, 3, 0] as default value.
```
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
with ops.name_scope(name, "%s_lookup_table_find" % self.name,
(self.resource_handle, keys, self._default_value)):
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys")
with ops.colocate_with(self.resource_handle):
values = gen_lookup_ops.lookup_table_find_v2(
self.resource_handle, keys, dynamic_default_values
if dynamic_default_values is not None else self._default_value)
return values
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the table's
key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
with ops.name_scope(name, "%s_lookup_table_insert" % self.name,
[self.resource_handle, keys, values]):
keys = ops.convert_to_tensor(keys, self._key_dtype, name="keys")
values = ops.convert_to_tensor(values, self._value_dtype, name="values")
with ops.colocate_with(self.resource_handle):
# pylint: disable=protected-access
op = gen_lookup_ops.lookup_table_insert_v2(self.resource_handle, keys,
values)
return op
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self.name,
[self.resource_handle]):
with ops.colocate_with(self.resource_handle):
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(
self.resource_handle, self._key_dtype, self._value_dtype)
return exported_keys, exported_values
def _serialize_to_tensors(self):
"""Implements checkpointing protocols for `Trackable`."""
tensors = self.export()
return {"-keys": tensors[0], "-values": tensors[1]}
def _restore_from_tensors(self, restored_tensors):
"""Implements checkpointing protocols for `Trackable`."""
with ops.name_scope("%s_table_restore" % self._name):
with ops.colocate_with(self.resource_handle):
return gen_lookup_ops.lookup_table_import_v2(
self.resource_handle,
restored_tensors["-keys"],
restored_tensors["-values"])
def _copy_trackable_to_cpu(self, object_map):
"""Implements checkpointing protocols for `Trackable`."""
if self not in object_map:
# If self is not already populated in object map, instantiate the copy
object_map[self] = MutableHashTable(
self._key_dtype,
self._value_dtype,
self._default_value,
self._name,
self._checkpoint,
self._is_anonymous
)
# Copy values from `self` to copy of `self`
serialized = self._serialize_to_tensors()
object_map[self]._restore_from_tensors(serialized) # pylint: disable=protected-access
# This class is needed for `MutableHashTable(checkpoint=True)`.
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for DenseHashTable."""
def __init__(self, table, name, table_name=None):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
self.table_name = table_name or name
# pylint: disable=protected-access
super(MutableHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, restored_shapes):
del restored_shapes # unused
# pylint: disable=protected-access
with ops.name_scope("%s_table_restore" % self.table_name):
with ops.colocate_with(self.op.resource_handle):
return gen_lookup_ops.lookup_table_import_v2(self.op.resource_handle,
restored_tensors[0],
restored_tensors[1])
@tf_export("lookup.experimental.DenseHashTable")
@saveable_compat.legacy_saveable_name("table")
| MutableHashTable |
python | lazyprogrammer__machine_learning_examples | nlp_class2/glove_svd.py | {
"start": 758,
"end": 6791
} | class ____:
def __init__(self, D, V, context_sz):
self.D = D
self.V = V
self.context_sz = context_sz
def fit(self, sentences, cc_matrix=None):
# build co-occurrence matrix
# paper calls it X, so we will call it X, instead of calling
# the training data X
# TODO: would it be better to use a sparse matrix?
t0 = datetime.now()
V = self.V
D = self.D
if not os.path.exists(cc_matrix):
X = np.zeros((V, V))
N = len(sentences)
print("number of sentences to process:", N)
it = 0
for sentence in sentences:
it += 1
if it % 10000 == 0:
print("processed", it, "/", N)
n = len(sentence)
for i in range(n):
# i is not the word index!!!
# j is not the word index!!!
# i just points to which element of the sequence (sentence) we're looking at
wi = sentence[i]
start = max(0, i - self.context_sz)
end = min(n, i + self.context_sz)
# we can either choose only one side as context, or both
# here we are doing both
# make sure "start" and "end" tokens are part of some context
# otherwise their f(X) will be 0 (denominator in bias update)
if i - self.context_sz < 0:
points = 1.0 / (i + 1)
X[wi,0] += points
X[0,wi] += points
if i + self.context_sz > n:
points = 1.0 / (n - i)
X[wi,1] += points
X[1,wi] += points
# left side
for j in range(start, i):
wj = sentence[j]
points = 1.0 / (i - j) # this is +ve
X[wi,wj] += points
X[wj,wi] += points
# right side
for j in range(i + 1, end):
wj = sentence[j]
points = 1.0 / (j - i) # this is +ve
X[wi,wj] += points
X[wj,wi] += points
# save the cc matrix because it takes forever to create
np.save(cc_matrix, X)
else:
X = np.load(cc_matrix)
print("max in X:", X.max())
# target
logX = np.log(X + 1)
print("max in log(X):", logX.max())
print("time to build co-occurrence matrix:", (datetime.now() - t0))
# subtract global mean
mu = logX.mean()
model = TruncatedSVD(n_components=D)
Z = model.fit_transform(logX - mu)
S = np.diag(model.explained_variance_)
Sinv = np.linalg.inv(S)
self.W = Z.dot(Sinv)
self.U = model.components_.T
# calculate cost once
delta = self.W.dot(S).dot(self.U.T) + mu - logX
cost = (delta * delta).sum()
print("svd cost:", cost)
def save(self, fn):
# function word_analogies expects a (V,D) matrx and a (D,V) matrix
arrays = [self.W, self.U.T]
np.savez(fn, *arrays)
def main(we_file, w2i_file, use_brown=True, n_files=100):
if use_brown:
cc_matrix = "cc_matrix_brown.npy"
else:
cc_matrix = "cc_matrix_%s.npy" % n_files
# hacky way of checking if we need to re-load the raw data or not
# remember, only the co-occurrence matrix is needed for training
if os.path.exists(cc_matrix):
with open(w2i_file) as f:
word2idx = json.load(f)
sentences = [] # dummy - we won't actually use it
else:
if use_brown:
keep_words = set([
'king', 'man', 'woman',
'france', 'paris', 'london', 'rome', 'italy', 'britain', 'england',
'french', 'english', 'japan', 'japanese', 'chinese', 'italian',
'australia', 'australian', 'december', 'november', 'june',
'january', 'february', 'march', 'april', 'may', 'july', 'august',
'september', 'october',
])
sentences, word2idx = get_sentences_with_word2idx_limit_vocab(n_vocab=5000, keep_words=keep_words)
else:
sentences, word2idx = get_wikipedia_data(n_files=n_files, n_vocab=2000)
with open(w2i_file, 'w') as f:
json.dump(word2idx, f)
V = len(word2idx)
model = Glove(100, V, 10)
# alternating least squares method
model.fit(sentences, cc_matrix=cc_matrix)
model.save(we_file)
if __name__ == '__main__':
we = 'glove_svd_50.npz'
w2i = 'glove_word2idx_50.json'
# we = 'glove_svd_brown.npz'
# w2i = 'glove_word2idx_brown.json'
main(we, w2i, use_brown=False)
# load back embeddings
npz = np.load(we)
W1 = npz['arr_0']
W2 = npz['arr_1']
with open(w2i) as f:
word2idx = json.load(f)
idx2word = {i:w for w,i in word2idx.items()}
for concat in (True, False):
print("** concat:", concat)
if concat:
We = np.hstack([W1, W2.T])
else:
We = (W1 + W2.T) / 2
find_analogies('king', 'man', 'woman', We, word2idx, idx2word)
find_analogies('france', 'paris', 'london', We, word2idx, idx2word)
find_analogies('france', 'paris', 'rome', We, word2idx, idx2word)
find_analogies('paris', 'france', 'italy', We, word2idx, idx2word)
find_analogies('france', 'french', 'english', We, word2idx, idx2word)
find_analogies('japan', 'japanese', 'chinese', We, word2idx, idx2word)
find_analogies('japan', 'japanese', 'italian', We, word2idx, idx2word)
find_analogies('japan', 'japanese', 'australian', We, word2idx, idx2word)
find_analogies('december', 'november', 'june', We, word2idx, idx2word)
| Glove |
python | encode__django-rest-framework | tests/test_renderers.py | {
"start": 2538,
"end": 2655
} | class ____(APIView):
def post(self, request, **kwargs):
return Response({'foo': request.data})
| MockPOSTView |
python | openai__gym | gym/envs/mujoco/half_cheetah.py | {
"start": 111,
"end": 1840
} | class ____(MuJocoPyEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 20,
}
def __init__(self, **kwargs):
observation_space = Box(low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64)
MuJocoPyEnv.__init__(
self, "half_cheetah.xml", 5, observation_space=observation_space, **kwargs
)
utils.EzPickle.__init__(self, **kwargs)
def step(self, action):
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
ob = self._get_obs()
reward_ctrl = -0.1 * np.square(action).sum()
reward_run = (xposafter - xposbefore) / self.dt
reward = reward_ctrl + reward_run
terminated = False
if self.render_mode == "human":
self.render()
return (
ob,
reward,
terminated,
False,
dict(reward_run=reward_run, reward_ctrl=reward_ctrl),
)
def _get_obs(self):
return np.concatenate(
[
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
]
)
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
low=-0.1, high=0.1, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.standard_normal(self.model.nv) * 0.1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
assert self.viewer is not None
self.viewer.cam.distance = self.model.stat.extent * 0.5
| HalfCheetahEnv |
python | simonw__datasette | datasette/views/special.py | {
"start": 2264,
"end": 2635
} | class ____(View):
async def get(self, request, datasette):
await datasette.ensure_permission(action="view-instance", actor=request.actor)
return Response.html(
await datasette.render_template(
"patterns.html",
request=request,
view_name="patterns",
)
)
| PatternPortfolioView |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/serializable_types/pyspark.py | {
"start": 99,
"end": 1629
} | class ____(dict):
"""Custom type implementing pydantic validation."""
struct_type: pyspark.sql.types.StructType
def __init__(
self,
fields_or_struct_type: pyspark.sql.types.StructType
| list[pyspark.sql.types.StructField]
| None,
):
# Store a copy of the instantiated type as an instance variable
if isinstance(fields_or_struct_type, pyspark.sql.types.StructType):
self.struct_type = fields_or_struct_type
else:
self.struct_type = pyspark.sql.types.StructType(fields=fields_or_struct_type)
# Store the serialized version in the keys/values of the instance (parent is dict)
json_value = self.struct_type.jsonValue()
super().__init__(**json_value)
@classmethod
def validate(
cls,
fields_or_struct_type: pyspark.sql.types.StructType
| list[pyspark.sql.types.StructField]
| None,
):
"""If already StructType then return otherwise try to create a StructType."""
if isinstance(fields_or_struct_type, pyspark.sql.types.StructType):
return cls(fields_or_struct_type.fields)
else:
return cls(fields_or_struct_type)
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
| SerializableStructType |
python | ray-project__ray | python/ray/tests/test_network_failure_e2e.py | {
"start": 8043,
"end": 10091
} | class ____:
def __init__(self, counter):
self.counter = counter
async def run(self):
count = await self.counter.get.remote()
if count == 0:
# first attempt
await self.counter.inc.remote()
while len(list_tasks(
filters=[("name", "=", "AsyncActor.run")])) < 2:
# wait for second attempt to be made
await asyncio.sleep(1)
# wait until the second attempt reaches the actor
await asyncio.sleep(2)
await self.counter.inc.remote()
return "first"
else:
# second attempt
# make sure second attempt only runs
# after first attempt finishes
assert count == 2
return "second"
counter = Counter.remote()
async_actor = AsyncActor.remote(counter)
assert ray.get(async_actor.run.remote()) == "second"
"""
check_async_actor_run_is_called = """
import ray
from ray._common.test_utils import wait_for_condition
ray.init(namespace="test")
wait_for_condition(lambda: ray.get_actor("counter") is not None)
counter = ray.get_actor("counter")
wait_for_condition(lambda: ray.get(counter.get.remote()) == 1)
"""
def inject_transient_network_failure():
try:
result = head3.exec_run(
cmd=f"python -c '{check_async_actor_run_is_called}'"
)
assert result.exit_code == 0, result.output.decode("utf-8")
worker_ip = worker3._container.attrs["NetworkSettings"]["Networks"][
network.name
]["IPAddress"]
network.disconnect(worker3.name, force=True)
sleep(2)
network.connect(worker3.name, ipv4_address=worker_ip)
except Exception as e:
print(f"Network failure injection failed {e}")
t = threading.Thread(target=inject_transient_network_failure, daemon=True)
t.start()
result = head3.exec_run(
cmd=f"python -c '{driver}'",
)
assert result.exit_code == 0, result.output.decode("utf-8")
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| AsyncActor |
python | pytorch__pytorch | .github/scripts/gitutils.py | {
"start": 1694,
"end": 3644
} | class ____:
commit_hash: str
title: str
body: str
author: str
author_date: datetime
commit_date: Optional[datetime]
def __init__(
self,
commit_hash: str,
author: str,
author_date: datetime,
title: str,
body: str,
commit_date: Optional[datetime] = None,
) -> None:
self.commit_hash = commit_hash
self.author = author
self.author_date = author_date
self.commit_date = commit_date
self.title = title
self.body = body
def __repr__(self) -> str:
return f"{self.title} ({self.commit_hash})"
def __contains__(self, item: Any) -> bool:
return item in self.body or item in self.title
def parse_fuller_format(lines: Union[str, list[str]]) -> GitCommit:
"""
Expect commit message generated using `--format=fuller --date=unix` format, i.e.:
commit <sha1>
Author: <author>
AuthorDate: <author date>
Commit: <committer>
CommitDate: <committer date>
<title line>
<full commit message>
"""
if isinstance(lines, str):
lines = lines.split("\n")
# TODO: Handle merge commits correctly
if len(lines) > 1 and lines[1].startswith("Merge:"):
del lines[1]
assert len(lines) > 7
assert lines[0].startswith("commit")
assert lines[1].startswith("Author: ")
assert lines[2].startswith("AuthorDate: ")
assert lines[3].startswith("Commit: ")
assert lines[4].startswith("CommitDate: ")
assert len(lines[5]) == 0
return GitCommit(
commit_hash=lines[0].split()[1].strip(),
author=lines[1].split(":", 1)[1].strip(),
author_date=datetime.fromtimestamp(int(lines[2].split(":", 1)[1].strip())),
commit_date=datetime.fromtimestamp(int(lines[4].split(":", 1)[1].strip())),
title=lines[6].strip(),
body="\n".join(lines[7:]),
)
| GitCommit |
python | doocs__leetcode | solution/2700-2799/2728.Count Houses in a Circular Street/Solution.py | {
"start": 252,
"end": 576
} | class ____:
def houseCount(self, street: Optional["Street"], k: int) -> int:
for _ in range(k):
street.openDoor()
street.moveLeft()
ans = 0
while street.isDoorOpen():
street.closeDoor()
street.moveLeft()
ans += 1
return ans
| Solution |
python | facebook__pyre-check | client/commands/infer.py | {
"start": 14582,
"end": 15090
} | class ____:
name: str
return_annotation: TypeAnnotation
parameters: Sequence[Parameter]
is_async: bool
def to_stub(self) -> str:
name = _sanitize_name(self.name)
async_ = "async " if self.is_async else ""
parameters = ", ".join(parameter.to_stub() for parameter in self.parameters)
return_ = self.return_annotation.to_stub(prefix=" -> ")
return f"{async_}def {name}({parameters}){return_}: ..."
@dataclasses.dataclass(frozen=True)
| FunctionAnnotation |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_basic.py | {
"start": 5893,
"end": 7419
} | class ____(fixtures.DeclarativeMappedTest):
run_setup_mappers = "once"
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(ComparableEntity, Base):
__tablename__ = "a"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
discriminator = Column(String(50), nullable=False)
child_id = Column(Integer, ForeignKey("a.id"))
child = relationship("A")
__mapper_args__ = {
"polymorphic_identity": "a",
"polymorphic_on": case((discriminator == "a", "a"), else_="b"),
}
class B(A):
__mapper_args__ = {"polymorphic_identity": "b"}
@classmethod
def insert_data(cls, connection):
A = cls.classes.A
session = Session(connection)
session.add_all(
[
A(id=1, discriminator="a"),
A(id=2, discriminator="b", child_id=1),
A(id=3, discriminator="c", child_id=1),
]
)
session.commit()
def test_joinedload(self):
A = self.classes.A
B = self.classes.B
session = fixture_session()
result = (
session.query(A)
.filter_by(child_id=None)
.options(joinedload(A.child))
.one()
)
eq_(result, A(id=1, discriminator="a", child=[B(id=2), B(id=3)]))
| PolyExpressionEagerLoad |
python | pandas-dev__pandas | pandas/io/pytables.py | {
"start": 149949,
"end": 156936
} | class ____(Table):
"""support the new appendable table formats"""
table_type = "appendable"
# error: Signature of "write" incompatible with supertype "Fixed"
def write( # type: ignore[override]
self,
obj,
axes=None,
append: bool = False,
complib=None,
complevel=None,
fletcher32=None,
min_itemsize=None,
chunksize: int | None = None,
expectedrows=None,
dropna: bool = False,
nan_rep=None,
data_columns=None,
track_times: bool = True,
) -> None:
if not append and self.is_exists:
self._handle.remove_node(self.group, "table")
# create the axes
table = self._create_axes(
axes=axes,
obj=obj,
validate=append,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
)
for a in table.axes:
a.validate_names()
if not table.is_exists:
# create the table
options = table.create_description(
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows,
)
# set the table attributes
table.set_attrs()
options["track_times"] = track_times
# create the table
table._handle.create_table(table.group, **options)
# update my info
table.attrs.info = table.info
# validate the axes and set the kinds
for a in table.axes:
a.validate_and_set(table, append)
# add the rows
table.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize: int | None, dropna: bool = False) -> None:
"""
we form the data into a 2-d including indexes,values,mask write chunk-by-chunk
"""
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype("u1", copy=False))
# consolidate masks
if masks:
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
assert nindexes == 1, nindexes # ensures we dont need to broadcast
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(v.reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = nrows // chunksize + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in indexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues],
)
def write_data_chunk(
self,
rows: np.ndarray,
indexes: list[np.ndarray],
mask: npt.NDArray[np.bool_] | None,
values: list[np.ndarray],
) -> None:
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
if len(rows):
self.table.append(rows)
self.table.flush()
def delete(
self, where=None, start: int | None = None, stop: int | None = None
) -> int | None:
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
selection = Selection(self, where, start=start, stop=stop)
values = selection.select_coords()
# delete the rows in reverse order
sorted_series = Series(values, copy=False).sort_values()
ln = len(sorted_series)
if ln:
# construct groups of consecutive rows
diff = sorted_series.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not groups:
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = sorted_series.take(range(g, pg))
table.remove_rows(
start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1
)
pg = g
self.table.flush()
# return the number of rows removed
return ln
| AppendableTable |
python | plotly__plotly.py | plotly/graph_objs/layout/scene/camera/_center.py | {
"start": 235,
"end": 2877
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene.camera"
_path_str = "layout.scene.camera.center"
_valid_props = {"x", "y", "z"}
@property
def x(self):
"""
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def z(self):
"""
The 'z' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def _prop_descriptions(self):
return """\
x
y
z
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Center object
Sets the (x,y,z) components of the 'center' camera vector This
vector determines the translation (x,y,z) space about the
center of this scene. By default, there is no such translation.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.camera.Center`
x
y
z
Returns
-------
Center
"""
super().__init__("center")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.camera.Center
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.camera.Center`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._set_property("z", arg, z)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Center |
python | django__django | django/contrib/admin/widgets.py | {
"start": 12787,
"end": 12946
} | class ____(forms.Textarea):
def __init__(self, attrs=None):
super().__init__(attrs={"class": "vLargeTextField", **(attrs or {})})
| AdminTextareaWidget |
python | readthedocs__readthedocs.org | readthedocs/oauth/services/github.py | {
"start": 841,
"end": 22348
} | class ____(UserService):
"""Provider service for GitHub."""
vcs_provider_slug = GITHUB
allauth_provider = GitHubProvider
base_api_url = "https://api.github.com"
# TODO replace this with a less naive check
url_pattern = re.compile(r"github\.com")
supports_build_status = True
def sync_repositories(self):
"""Sync repositories from GitHub API."""
remote_ids = []
try:
repos = self.paginate(f"{self.base_api_url}/user/repos", per_page=100)
for repo in repos:
remote_repository = self.create_repository(repo)
if remote_repository:
remote_ids.append(remote_repository.remote_id)
except (TypeError, ValueError):
log.warning("Error syncing GitHub repositories")
raise SyncServiceError(
SyncServiceError.INVALID_OR_REVOKED_ACCESS_TOKEN.format(
provider=self.vcs_provider_slug
)
)
return remote_ids
def sync_organizations(self):
"""
Sync organizations from GitHub API.
This method only creates the relationships between the
organizations and the user, as all the repositories
are already created in the sync_repositories method.
"""
organization_remote_ids = []
try:
orgs = self.paginate(f"{self.base_api_url}/user/orgs", per_page=100)
for org in orgs:
org_details = self.session.get(org["url"]).json()
remote_organization = self.create_organization(org_details)
remote_organization.get_remote_organization_relation(self.user, self.account)
organization_remote_ids.append(remote_organization.remote_id)
except (TypeError, ValueError):
log.warning("Error syncing GitHub organizations")
raise SyncServiceError(
SyncServiceError.INVALID_OR_REVOKED_ACCESS_TOKEN.format(
provider=self.allauth_provider.name
)
)
return organization_remote_ids, []
def _has_access_to_repository(self, fields):
"""Check if the user has access to the repository, and if they are an admin."""
permissions = fields.get("permissions", {})
# If the repo is public, the user can still access it,
# so we need to check if the user has any access
# to the repository, even if they are not an admin.
has_access = any(
permissions.get(key, False) for key in ["admin", "maintain", "push", "triage"]
)
is_admin = permissions.get("admin", False)
return has_access, is_admin
def update_repository(self, remote_repository: RemoteRepository):
resp = self.session.get(f"{self.base_api_url}/repositories/{remote_repository.remote_id}")
# The repo was deleted, or the user does not have access to it.
# In any case, we remove the user relationship.
if resp.status_code in [403, 404]:
log.info(
"User no longer has access to the repository, removing remote relationship.",
remote_repository=remote_repository.remote_id,
)
remote_repository.get_remote_repository_relation(self.user, self.account).delete()
return
if resp.status_code != 200:
log.warning(
"Error fetching repository from GitHub",
remote_repository=remote_repository.remote_id,
status_code=resp.status_code,
)
return
data = resp.json()
self._update_repository_from_fields(remote_repository, data)
has_access, is_admin = self._has_access_to_repository(data)
relation = remote_repository.get_remote_repository_relation(
self.user,
self.account,
)
if not has_access:
# If the user no longer has access to the repository,
# we remove the remote relationship.
log.info(
"User no longer has access to the repository, removing remote relationship.",
remote_repository=remote_repository.remote_id,
)
relation.delete()
else:
relation.admin = is_admin
relation.save()
def create_repository(self, fields, privacy=None):
"""
Update or create a repository from GitHub API response.
:param fields: dictionary of response data from API
:param privacy: privacy level to support
:rtype: RemoteRepository
"""
privacy = privacy or settings.DEFAULT_PRIVACY_LEVEL
if any(
[
(privacy == "private"),
(fields["private"] is False and privacy == "public"),
]
):
repo, _ = RemoteRepository.objects.get_or_create(
remote_id=str(fields["id"]),
vcs_provider=self.vcs_provider_slug,
)
self._update_repository_from_fields(repo, fields)
remote_repository_relation = repo.get_remote_repository_relation(
self.user, self.account
)
_, is_admin = self._has_access_to_repository(fields)
remote_repository_relation.admin = is_admin
remote_repository_relation.save()
return repo
log.debug(
"Not importing repository because mismatched type.",
repository=fields["name"],
)
def _update_repository_from_fields(self, repo, fields):
owner_type = fields["owner"]["type"]
organization = None
if owner_type == "Organization":
organization = self.create_organization(fields=fields["owner"])
# If there is an organization associated with this repository,
# attach the organization to the repository.
if organization and owner_type == "Organization":
repo.organization = organization
# If the repository belongs to a user,
# remove the organization linked to the repository.
if owner_type == "User":
repo.organization = None
repo.name = fields["name"]
repo.full_name = fields["full_name"]
repo.description = fields["description"]
repo.ssh_url = fields["ssh_url"]
repo.html_url = fields["html_url"]
repo.private = fields["private"]
repo.vcs = "git"
repo.avatar_url = fields.get("owner", {}).get("avatar_url")
repo.default_branch = fields.get("default_branch")
if repo.private:
repo.clone_url = fields["ssh_url"]
else:
repo.clone_url = fields["clone_url"]
if not repo.avatar_url:
repo.avatar_url = self.default_user_avatar_url
repo.save()
def create_organization(self, fields):
"""
Update or create remote organization from GitHub API response.
:param fields: dictionary response of data from API
:param bool create_relationship: Whether to create a remote relationship between the
organization and the current user. If `False`, only the `RemoteOrganization` object
will be created/updated.
:rtype: RemoteOrganization
.. note::
This method caches organizations by their remote ID to avoid
unnecessary database queries, specially when creating
multiple repositories that belong to the same organization.
"""
organization_id = str(fields["id"])
if organization_id in self._organizations_cache:
return self._organizations_cache[organization_id]
organization, _ = RemoteOrganization.objects.get_or_create(
remote_id=organization_id,
vcs_provider=self.vcs_provider_slug,
)
organization.url = fields.get("html_url")
# fields['login'] contains GitHub Organization slug
organization.slug = fields.get("login")
organization.name = fields.get("name")
organization.email = fields.get("email")
organization.avatar_url = fields.get("avatar_url")
if not organization.avatar_url:
organization.avatar_url = self.default_org_avatar_url
organization.save()
self._organizations_cache[organization_id] = organization
return organization
def get_next_url_to_paginate(self, response):
return response.links.get("next", {}).get("url")
def get_paginated_results(self, response):
return response.json()
def get_webhook_data(self, project, integration):
"""Get webhook JSON data to post to the API."""
return json.dumps(
{
"name": "web",
"active": True,
"config": {
"url": self.get_webhook_url(project, integration),
"secret": integration.secret,
"content_type": "json",
},
"events": ["push", "pull_request", "create", "delete"],
}
)
def get_provider_data(self, project, integration):
"""
Gets provider data from GitHub Webhooks API.
:param project: project
:type project: Project
:param integration: Integration for the project
:type integration: Integration
:returns: Dictionary containing provider data from the API or None
:rtype: dict
"""
if integration.provider_data:
return integration.provider_data
owner, repo = build_utils.get_github_username_repo(url=project.repo)
url = f"{self.base_api_url}/repos/{owner}/{repo}/hooks"
structlog.contextvars.bind_contextvars(
url=url,
project_slug=project.slug,
integration_id=integration.pk,
)
rtd_webhook_url = self.get_webhook_url(project, integration)
try:
resp = self.session.get(url)
if resp.status_code == 200:
recv_data = resp.json()
for webhook_data in recv_data:
if webhook_data["config"]["url"] == rtd_webhook_url:
integration.provider_data = webhook_data
integration.save()
log.info(
"GitHub integration updated with provider data for project.",
)
break
else:
log.warning(
"GitHub project does not exist or user does not have permissions.",
https_status_code=resp.status_code,
)
except Exception:
log.exception("GitHub webhook Listing failed for project.")
return integration.provider_data
def setup_webhook(self, project, integration=None) -> bool:
"""
Set up GitHub project webhook for project.
:param project: project to set up webhook for
:type project: Project
:param integration: Integration for the project
:type integration: Integration
:returns: boolean based on webhook set up success, and requests Response object
"""
owner, repo = build_utils.get_github_username_repo(url=project.repo)
if not integration:
integration, _ = Integration.objects.get_or_create(
project=project,
integration_type=Integration.GITHUB_WEBHOOK,
)
data = self.get_webhook_data(project, integration)
url = f"{self.base_api_url}/repos/{owner}/{repo}/hooks"
structlog.contextvars.bind_contextvars(
url=url,
project_slug=project.slug,
integration_id=integration.pk,
)
resp = None
try:
resp = self.session.post(
url,
data=data,
headers={"content-type": "application/json"},
)
structlog.contextvars.bind_contextvars(http_status_code=resp.status_code)
# GitHub will return 200 if already synced
if resp.status_code in [200, 201]:
recv_data = resp.json()
integration.provider_data = recv_data
integration.save()
log.debug("GitHub webhook creation successful for project.")
return True
if resp.status_code in [401, 403, 404]:
log.warning("GitHub project does not exist or user does not have permissions.")
else:
# Unknown response from GitHub
try:
debug_data = resp.json()
except ValueError:
debug_data = resp.content
log.warning(
"GitHub webhook creation failed for project. Unknown response from GitHub.",
debug_data=debug_data,
)
# Catch exceptions with request or deserializing JSON
except (RequestException, ValueError):
log.exception("GitHub webhook creation failed for project.")
return False
def update_webhook(self, project, integration) -> bool:
"""
Update webhook integration.
:param project: project to set up webhook for
:type project: Project
:param integration: Webhook integration to update
:type integration: Integration
:returns: boolean based on webhook update success, and requests Response object
"""
data = self.get_webhook_data(project, integration)
resp = None
provider_data = self.get_provider_data(project, integration)
structlog.contextvars.bind_contextvars(
project_slug=project.slug,
integration_id=integration.pk,
)
# Handle the case where we don't have a proper provider_data set
# This happens with a user-managed webhook previously
if not provider_data:
return self.setup_webhook(project, integration)
try:
url = provider_data.get("url")
resp = self.session.patch(
url,
data=data,
headers={"content-type": "application/json"},
)
structlog.contextvars.bind_contextvars(
http_status_code=resp.status_code,
url=url,
)
# GitHub will return 200 if already synced
if resp.status_code in [200, 201]:
recv_data = resp.json()
integration.provider_data = recv_data
integration.save()
log.info("GitHub webhook update successful for project.")
return True
# GitHub returns 404 when the webhook doesn't exist. In this case,
# we call ``setup_webhook`` to re-configure it from scratch
if resp.status_code == 404:
return self.setup_webhook(project, integration)
# Unknown response from GitHub
try:
debug_data = resp.json()
except ValueError:
debug_data = resp.content
log.warning(
"GitHub webhook update failed. Unknown response from GitHub",
debug_data=debug_data,
)
# Catch exceptions with request or deserializing JSON
except (AttributeError, RequestException, ValueError):
log.exception("GitHub webhook update failed for project.")
return False
def remove_webhook(self, project):
"""
Remove GitHub webhook for the repository associated with the project.
We delete all webhooks that match the URL of the webhook we set up.
The URLs can be in several formats, so we check for all of them:
- https://app.readthedocs.org/api/v2/webhook/github/<project_slug>/<id>
- https://app.readthedocs.org/api/v2/webhook/<project_slug>/<id>
- https://readthedocs.org/api/v2/webhook/github/<project_slug>/<id>
- https://readthedocs.org/api/v2/webhook/<project_slug>/<id>
If a webhook fails to be removed, we log the error and cancel the operation,
as if we weren't able to delete one webhook, we won't be able to delete the others either.
If we didn't find any webhook to delete, we return True.
"""
owner, repo = build_utils.get_github_username_repo(url=project.repo)
try:
resp = self.session.get(f"{self.base_api_url}/repos/{owner}/{repo}/hooks")
resp.raise_for_status()
data = resp.json()
except HTTPError:
log.info("Failed to get GitHub webhooks for project.")
return False
hook_targets = [
f"{settings.PUBLIC_API_URL}/api/v2/webhook/{project.slug}/",
f"{settings.PUBLIC_API_URL}/api/v2/webhook/github/{project.slug}/",
]
hook_targets.append(hook_targets[0].replace("app.", "", 1))
hook_targets.append(hook_targets[1].replace("app.", "", 1))
for hook in data:
hook_url = hook["config"]["url"]
for hook_target in hook_targets:
if hook_url.startswith(hook_target):
try:
self.session.delete(
f"{self.base_api_url}/repos/{owner}/{repo}/hooks/{hook['id']}"
).raise_for_status()
except HTTPError:
log.info("Failed to remove GitHub webhook for project.")
return False
return True
def remove_ssh_key(self, project) -> bool:
"""
Remove the SSH key from the GitHub repository associated with the project.
This is overridden in .com, as we don't make use of the SSH keys in .org.
"""
return True
def send_build_status(self, *, build, commit, status):
"""
Create GitHub commit status for project.
:param build: Build to set up commit status for
:type build: Build
:param status: build state failure, pending, or success.
:type status: str
:param commit: commit sha of the pull request
:type commit: str
:returns: boolean based on commit status creation was successful or not.
:rtype: Bool
"""
project = build.project
owner, repo = build_utils.get_github_username_repo(url=project.repo)
# select the correct status and description.
github_build_status = SELECT_BUILD_STATUS[status]["github"]
description = SELECT_BUILD_STATUS[status]["description"]
statuses_url = f"{self.base_api_url}/repos/{owner}/{repo}/statuses/{commit}"
if status == BUILD_STATUS_SUCCESS:
# Link to the documentation for this version
target_url = build.version.get_absolute_url()
else:
# Link to the build detail's page
target_url = build.get_full_url()
context = f"{settings.RTD_BUILD_STATUS_API_NAME}:{project.slug}"
data = {
"state": github_build_status,
"target_url": target_url,
"description": description,
"context": context,
}
structlog.contextvars.bind_contextvars(
project_slug=project.slug,
commit_status=github_build_status,
user_username=self.user.username,
statuses_url=statuses_url,
target_url=target_url,
status=status,
)
resp = None
try:
resp = self.session.post(
statuses_url,
data=json.dumps(data),
headers={"content-type": "application/json"},
)
structlog.contextvars.bind_contextvars(http_status_code=resp.status_code)
if resp.status_code == 201:
log.debug("GitHub commit status created for project.")
return True
if resp.status_code in [401, 403, 404]:
log.info("GitHub project does not exist or user does not have permissions.")
return False
if resp.status_code == 422 and "No commit found for SHA" in resp.json()["message"]:
# This happens when the user force-push a branch or similar
# that changes the Git history and SHA does not exist anymore.
#
# We return ``True`` here because otherwise our logic will try
# with different users. However, all of them will fail since
# it's not a permission issue.
return True
try:
debug_data = resp.json()
except ValueError:
debug_data = resp.content
log.warning(
"GitHub commit status creation failed. Unknown GitHub response.",
debug_data=debug_data,
)
# Catch exceptions with request or deserializing JSON
except (RequestException, ValueError):
log.exception("GitHub commit status creation failed for project.")
except InvalidGrantError:
log.info("Invalid GitHub grant for user.", exc_info=True)
except TokenExpiredError:
log.info("GitHub token expired for user.", exc_info=True)
return False
| GitHubService |
python | chroma-core__chroma | chromadb/execution/expression/operator.py | {
"start": 32880,
"end": 33070
} | class ____(Rank):
"""Minimum of multiple ranks"""
ranks: List[Rank]
def to_dict(self) -> Dict[str, Any]:
return {"$min": [r.to_dict() for r in self.ranks]}
@dataclass
| Min |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 407810,
"end": 410428
} | class ____(Request):
"""
Unarchive tasks
:param ids: IDs of the tasks to unarchive
:type ids: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "unarchive_many"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "IDs of the tasks to unarchive",
"items": {"type": "string"},
"type": "array",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(
self, ids: List[str], status_reason: Optional[str] = None, status_message: Optional[str] = None, **kwargs: Any
) -> None:
super(UnarchiveManyRequest, self).__init__(**kwargs)
self.ids = ids
self.status_reason = status_reason
self.status_message = status_message
@schema_property("ids")
def ids(self) -> List[str]:
return self._property_ids
@ids.setter
def ids(self, value: List[str]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| UnarchiveManyRequest |
python | tensorflow__tensorflow | tensorflow/python/training/server_lib_test.py | {
"start": 19418,
"end": 25375
} | class ____(test.TestCase):
def testStringConversion(self):
cluster_spec = server_lib.ClusterSpec(
{"ps": ["ps0:1111"], "worker": ["worker0:3333", "worker1:4444"]}
)
expected_str = (
"ClusterSpec({'ps': ['ps0:1111'], 'worker': ['worker0:3333', "
"'worker1:4444']})"
)
self.assertEqual(expected_str, str(cluster_spec))
def testProtoDictDefEquivalences(self):
cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"],
})
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def()
)
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def(),
)
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def(),
)
def testProtoDictDefEquivalencesWithStringTaskIndex(self):
cluster_spec = server_lib.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"], "worker": {"1": "worker1:2222"}}
)
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 1 value: 'worker1:2222' } }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def()
)
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def(),
)
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def(),
)
def testProtoDictDefEquivalencesWithZeroWorker(self):
cluster_spec = server_lib.ClusterSpec(
{"ps": ["ps0:2222", "ps1:2222"], "worker": []}
)
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def()
)
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def(),
)
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def(),
)
def testClusterSpecAccessors(self):
original_dict = {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"],
"sparse": {0: "sparse0:2222", 3: "sparse3:2222"},
}
cluster_spec = server_lib.ClusterSpec(original_dict)
self.assertEqual(original_dict, cluster_spec.as_dict())
self.assertEqual(2, cluster_spec.num_tasks("ps"))
self.assertEqual(3, cluster_spec.num_tasks("worker"))
self.assertEqual(2, cluster_spec.num_tasks("sparse"))
with self.assertRaises(ValueError):
cluster_spec.num_tasks("unknown")
self.assertEqual("ps0:2222", cluster_spec.task_address("ps", 0))
self.assertEqual("sparse0:2222", cluster_spec.task_address("sparse", 0))
with self.assertRaises(ValueError):
cluster_spec.task_address("unknown", 0)
with self.assertRaises(ValueError):
cluster_spec.task_address("sparse", 2)
self.assertEqual([0, 1], cluster_spec.task_indices("ps"))
self.assertEqual([0, 1, 2], cluster_spec.task_indices("worker"))
self.assertEqual([0, 3], cluster_spec.task_indices("sparse"))
with self.assertRaises(ValueError):
cluster_spec.task_indices("unknown")
# NOTE(mrry): `ClusterSpec.job_tasks()` is not recommended for use
# with sparse jobs.
self.assertEqual(["ps0:2222", "ps1:2222"], cluster_spec.job_tasks("ps"))
self.assertEqual(
["worker0:2222", "worker1:2222", "worker2:2222"],
cluster_spec.job_tasks("worker"),
)
self.assertEqual(
["sparse0:2222", None, None, "sparse3:2222"],
cluster_spec.job_tasks("sparse"),
)
with self.assertRaises(ValueError):
cluster_spec.job_tasks("unknown")
def testEmptyClusterSpecIsFalse(self):
self.assertFalse(server_lib.ClusterSpec({}))
def testNonEmptyClusterSpecIsTrue(self):
self.assertTrue(server_lib.ClusterSpec({"job": ["host:port"]}))
def testEq(self):
self.assertEqual(server_lib.ClusterSpec({}), server_lib.ClusterSpec({}))
self.assertEqual(
server_lib.ClusterSpec({"job": ["host:2222"]}),
server_lib.ClusterSpec({"job": ["host:2222"]}),
)
self.assertEqual(
server_lib.ClusterSpec({"job": {0: "host:2222"}}),
server_lib.ClusterSpec({"job": ["host:2222"]}),
)
def testNe(self):
self.assertNotEqual(
server_lib.ClusterSpec({}),
server_lib.ClusterSpec({"job": ["host:2223"]}),
)
self.assertNotEqual(
server_lib.ClusterSpec({"job1": ["host:2222"]}),
server_lib.ClusterSpec({"job2": ["host:2222"]}),
)
self.assertNotEqual(
server_lib.ClusterSpec({"job": ["host:2222"]}),
server_lib.ClusterSpec({"job": ["host:2223"]}),
)
self.assertNotEqual(
server_lib.ClusterSpec({"job": ["host:2222", "host:2223"]}),
server_lib.ClusterSpec({"job": ["host:2223", "host:2222"]}),
)
if __name__ == "__main__":
test.main()
| ClusterSpecTest |
python | numba__numba | numba/tests/test_parallel_backend.py | {
"start": 13910,
"end": 15628
} | class ____(TestCase):
"""
Helper class for running an isolated piece of code based on a template
"""
# sys path injection and separate usecase module to make sure everything
# is importable by children of multiprocessing
_here = "%r" % os.path.dirname(__file__)
template = """if 1:
import sys
sys.path.insert(0, "%(here)r")
import multiprocessing
import numpy as np
from numba import njit
import numba
try:
import threading_backend_usecases
except ImportError as e:
print("DEBUG:", sys.path)
raise e
import os
sigterm_handler = threading_backend_usecases.sigterm_handler
busy_func = threading_backend_usecases.busy_func
def the_test():
%%s
if __name__ == "__main__":
the_test()
""" % {'here': _here}
def run_cmd(self, cmdline, env=None):
if env is None:
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = str("omp")
popen = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
# finish in _TEST_TIMEOUT seconds or kill it
timeout = threading.Timer(_TEST_TIMEOUT, popen.kill)
try:
timeout.start()
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
finally:
timeout.cancel()
return out.decode(), err.decode()
@skip_parfors_unsupported
| ThreadLayerTestHelper |
python | tensorflow__tensorflow | tensorflow/python/ops/math_grad_test.py | {
"start": 19911,
"end": 21892
} | class ____(test.TestCase):
def _xlogy_gradients(self, x, y):
xlogy_xgrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), x)[0])
xlogy_ygrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), y)[0])
return xlogy_xgrad, xlogy_ygrad
@test_util.run_deprecated_v1
def testNonZeroValuesGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
xlogy_expected_xgrad = self.evaluate(math_ops.log(y))
xlogy_expected_ygrad = self.evaluate(x / y)
self.assertAllClose(xlogy_expected_xgrad, xlogy_xgrad)
self.assertAllClose(xlogy_expected_ygrad, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroXGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlogy_xgrad)
self.assertAllClose(zero, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
self.assertAllClose(-np.inf, xlogy_xgrad)
self.assertAllClose(np.inf, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroXYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlogy_xgrad)
self.assertAllClose(zero, xlogy_ygrad)
| XlogyTest |
python | mlflow__mlflow | tests/dspy/test_save.py | {
"start": 1263,
"end": 17526
} | class ____(dspy.Module):
def __init__(self):
super().__init__()
self.prog = dspy.ChainOfThought("question -> answer: int")
def forward(self, question):
return self.prog(question=question).answer
@pytest.fixture(autouse=True)
def reset_dspy_settings():
yield
dspy.settings.configure(lm=None, rm=None)
def test_basic_save():
dspy_model = CoT()
dspy.settings.configure(lm=dspy.LM(model="openai/gpt-4o-mini", max_tokens=250))
with mlflow.start_run():
model_info = mlflow.dspy.log_model(dspy_model, name="model")
# Clear the lm setting to test the loading logic.
dspy.settings.configure(lm=None)
loaded_model = mlflow.dspy.load_model(model_info.model_uri)
# Check that the global settings is popped back.
assert dspy.settings.lm.model == "openai/gpt-4o-mini"
assert isinstance(loaded_model, CoT)
def test_save_compiled_model(dummy_model):
train_data = [
"What is 2 + 2?",
"What is 3 + 3?",
"What is 4 + 4?",
"What is 5 + 5?",
]
train_label = ["4", "6", "8", "10"]
trainset = [
dspy.Example(question=q, answer=a).with_inputs("question")
for q, a in zip(train_data, train_label)
]
def dummy_metric(program):
return 1.0
dspy.settings.configure(lm=dummy_model)
dspy_model = CoT()
optimizer = dspy.teleprompt.BootstrapFewShot(metric=dummy_metric)
optimized_cot = optimizer.compile(dspy_model, trainset=trainset)
with mlflow.start_run():
model_info = mlflow.dspy.log_model(optimized_cot, name="model")
# Clear the lm setting to test the loading logic.
dspy.settings.configure(lm=None)
loaded_model = mlflow.dspy.load_model(model_info.model_uri)
assert isinstance(loaded_model, CoT)
assert loaded_model.prog.predictors()[0].demos == optimized_cot.prog.predictors()[0].demos
def test_dspy_save_preserves_object_state():
class GenerateAnswer(dspy.Signature):
"""Answer questions with short factoid answers."""
context = dspy.InputField(desc="may contain relevant facts")
question = dspy.InputField()
answer = dspy.OutputField(desc="often between 1 and 5 words")
class RAG(dspy.Module):
def __init__(self, num_passages=3):
super().__init__()
self.retrieve = dspy.Retrieve(k=num_passages)
self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
def forward(self, question):
assert question == "What is 2 + 2?"
context = self.retrieve(question).passages
prediction = self.generate_answer(context=context, question=question)
return dspy.Prediction(context=context, answer=prediction.answer)
def dummy_metric(*args, **kwargs):
return 1.0
model = DummyLM([{"answer": answer, "reasoning": "reason"} for answer in ["4", "6", "8", "10"]])
rm = dummy_rm(passages=["dummy1", "dummy2", "dummy3"])
dspy.settings.configure(lm=model, rm=rm)
train_data = [
"What is 2 + 2?",
"What is 3 + 3?",
"What is 4 + 4?",
"What is 5 + 5?",
]
train_label = ["4", "6", "8", "10"]
trainset = [
dspy.Example(question=q, answer=a).with_inputs("question").with_inputs("reasoning")
for q, a in zip(train_data, train_label)
]
dspy_model = RAG()
optimizer = dspy.teleprompt.BootstrapFewShot(metric=dummy_metric)
optimized_cot = optimizer.compile(dspy_model, trainset=trainset)
with mlflow.start_run():
model_info = mlflow.dspy.log_model(optimized_cot, name="model")
original_settings = dict(dspy.settings.config)
original_settings["traces"] = None
# Clear the lm setting to test the loading logic.
dspy.settings.configure(lm=None)
model_url = model_info.model_uri
input_examples = {"inputs": ["What is 2 + 2?"]}
# test that the model can be served
response = pyfunc_serve_and_score_model(
model_uri=model_url,
data=json.dumps(input_examples),
content_type="application/json",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
loaded_model = mlflow.dspy.load_model(model_url)
assert isinstance(loaded_model, RAG)
assert loaded_model.retrieve is not None
assert (
loaded_model.generate_answer.predictors()[0].demos
== optimized_cot.generate_answer.predictors()[0].demos
)
loaded_settings = dict(dspy.settings.config)
loaded_settings["traces"] = None
assert loaded_settings["lm"].model == original_settings["lm"].model
assert loaded_settings["lm"].model_type == original_settings["lm"].model_type
assert loaded_settings["rm"].__dict__ == original_settings["rm"].__dict__
del (
loaded_settings["lm"],
original_settings["lm"],
loaded_settings["rm"],
original_settings["rm"],
)
assert original_settings == loaded_settings
def test_load_logged_model_in_native_dspy(dummy_model):
dspy_model = CoT()
# Arbitrary set the demo to test saving/loading has no data loss.
dspy_model.prog.predictors()[0].demos = [
"What is 2 + 2?",
"What is 3 + 3?",
"What is 4 + 4?",
"What is 5 + 5?",
]
dspy.settings.configure(lm=dummy_model)
with mlflow.start_run():
model_info = mlflow.dspy.log_model(dspy_model, name="model")
loaded_dspy_model = mlflow.dspy.load_model(model_info.model_uri)
assert isinstance(loaded_dspy_model, CoT)
assert loaded_dspy_model.prog.predictors()[0].demos == dspy_model.prog.predictors()[0].demos
def test_serving_logged_model(dummy_model):
class CoT(dspy.Module):
def __init__(self):
super().__init__()
self.prog = dspy.ChainOfThought("question -> answer")
def forward(self, question):
assert question == "What is 2 + 2?"
return self.prog(question=question)
dspy_model = CoT()
dspy.settings.configure(lm=dummy_model)
input_examples = {"inputs": ["What is 2 + 2?"]}
input_schema = Schema([ColSpec("string")])
output_schema = Schema([ColSpec("string")])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.dspy.log_model(
dspy_model,
name=artifact_path,
signature=signature,
input_example=["What is 2 + 2?"],
)
model_uri = model_info.model_uri
dspy.settings.configure(lm=None)
response = pyfunc_serve_and_score_model(
model_uri=model_uri,
data=json.dumps(input_examples),
content_type="application/json",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
json_response = json.loads(response.content)
assert _REASONING_KEYWORD in json_response["predictions"]
assert "answer" in json_response["predictions"]
def test_log_model_multi_inputs(dummy_model):
class MultiInputCoT(dspy.Module):
def __init__(self):
super().__init__()
self.prog = dspy.ChainOfThought("question, hint -> answer")
def forward(self, question, hint):
assert question == "What is 2 + 2?"
assert hint == "Hint: 2 + 2 = ?"
return self.prog(question=question, hint=hint)
dspy_model = MultiInputCoT()
dspy.settings.configure(lm=dummy_model)
input_example = {"question": "What is 2 + 2?", "hint": "Hint: 2 + 2 = ?"}
with mlflow.start_run():
model_info = mlflow.dspy.log_model(
dspy_model,
name="model",
input_example=input_example,
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert loaded_model.predict(input_example) == {"answer": "6", _REASONING_KEYWORD: "reason"}
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=json.dumps({"inputs": [input_example]}),
content_type="application/json",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
json_response = json.loads(response.content)
assert _REASONING_KEYWORD in json_response["predictions"]
assert "answer" in json_response["predictions"]
def test_save_chat_model_with_string_output(dummy_model):
class CoT(dspy.Module):
def __init__(self):
super().__init__()
self.prog = dspy.ChainOfThought("question -> answer")
def forward(self, inputs):
# DSPy chat model's inputs is a list of dict with keys roles (optional) and content.
# And here we output a single string.
return self.prog(question=inputs[0]["content"]).answer
dspy_model = CoT()
dspy.settings.configure(lm=dummy_model)
input_examples = {"messages": [{"role": "user", "content": "What is 2 + 2?"}]}
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.dspy.log_model(
dspy_model,
name=artifact_path,
task="llm/v1/chat",
input_example=input_examples,
)
loaded_pyfunc = mlflow.pyfunc.load_model(model_info.model_uri)
response = loaded_pyfunc.predict(input_examples)
assert "choices" in response
assert len(response["choices"]) == 1
assert "message" in response["choices"][0]
# The content should just be a string.
assert response["choices"][0]["message"]["content"] == "4"
def test_serve_chat_model(dummy_model):
class CoT(dspy.Module):
def __init__(self):
super().__init__()
self.prog = dspy.ChainOfThought("question -> answer")
def forward(self, inputs):
return self.prog(question=inputs[0]["content"])
dspy_model = CoT()
dspy.settings.configure(lm=dummy_model)
input_examples = {"messages": [{"role": "user", "content": "What is 2 + 2?"}]}
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.dspy.log_model(
dspy_model,
name=artifact_path,
task="llm/v1/chat",
input_example=input_examples,
)
dspy.settings.configure(lm=None)
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=json.dumps(input_examples),
content_type="application/json",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
json_response = json.loads(response.content)
assert "choices" in json_response
assert len(json_response["choices"]) == 1
assert "message" in json_response["choices"][0]
assert _REASONING_KEYWORD in json_response["choices"][0]["message"]["content"]
assert "answer" in json_response["choices"][0]["message"]["content"]
def test_code_paths_is_used():
artifact_path = "model"
dspy_model = CoT()
with (
mlflow.start_run(),
mock.patch("mlflow.dspy.load._add_code_from_conf_to_system_path") as add_mock,
):
model_info = mlflow.dspy.log_model(dspy_model, name=artifact_path, code_paths=[__file__])
_compare_logged_code_paths(__file__, model_info.model_uri, "dspy")
mlflow.dspy.load_model(model_info.model_uri)
add_mock.assert_called()
def test_additional_pip_requirements():
expected_mlflow_version = _mlflow_major_version_string()
artifact_path = "model"
dspy_model = CoT()
with mlflow.start_run():
model_info = mlflow.dspy.log_model(
dspy_model, name=artifact_path, extra_pip_requirements=["dummy"]
)
_assert_pip_requirements(model_info.model_uri, [expected_mlflow_version, "dummy"])
def test_infer_signature_from_input_examples(dummy_model):
artifact_path = "model"
dspy_model = CoT()
dspy.settings.configure(lm=dummy_model)
with mlflow.start_run():
model_info = mlflow.dspy.log_model(
dspy_model, name=artifact_path, input_example="what is 2 + 2?"
)
loaded_model = Model.load(model_info.model_uri)
assert loaded_model.signature.inputs == Schema([ColSpec("string")])
assert loaded_model.signature.outputs == Schema(
[
ColSpec(name="answer", type="string"),
ColSpec(name=_REASONING_KEYWORD, type="string"),
]
)
@skip_if_2_6_23_or_older
def test_predict_stream_unsupported_schema(dummy_model):
dspy_model = NumericalCoT()
dspy.settings.configure(lm=dummy_model)
model_info = mlflow.dspy.log_model(dspy_model, name="model")
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert not loaded_model._model_meta.flavors["python_function"]["streamable"]
output = loaded_model.predict_stream({"question": "What is 2 + 2?"})
with pytest.raises(
mlflow.exceptions.MlflowException,
match="This model does not support predict_stream method.",
):
next(output)
@skip_if_2_6_23_or_older
def test_predict_stream_success(dummy_model):
dspy_model = CoT()
dspy.settings.configure(lm=dummy_model)
model_info = mlflow.dspy.log_model(
dspy_model, name="model", input_example={"question": "what is 2 + 2?"}
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
assert loaded_model._model_meta.flavors["python_function"]["streamable"]
results = []
def dummy_streamify(*args, **kwargs):
# In dspy>=3, `StreamResponse` requires `is_last_chunk` argument.
# https://github.com/stanfordnlp/dspy/pull/8587
extra_kwargs = {"is_last_chunk": False} if _DSPY_VERSION.major >= 3 else {}
yield dspy.streaming.StreamResponse(
predict_name="prog.predict",
signature_field_name="answer",
chunk="2",
**extra_kwargs,
)
extra_kwargs = {"is_last_chunk": True} if _DSPY_VERSION.major >= 3 else {}
yield dspy.streaming.StreamResponse(
predict_name="prog.predict",
signature_field_name=_REASONING_KEYWORD,
chunk="reason",
**extra_kwargs,
)
with mock.patch("dspy.streamify", return_value=dummy_streamify):
output = loaded_model.predict_stream({"question": "What is 2 + 2?"})
for o in output:
results.append(o)
assert len(results) == 2
extra_kwargs = {"is_last_chunk": False} if _DSPY_VERSION.major >= 3 else {}
assert results[0] == {
"predict_name": "prog.predict",
"signature_field_name": "answer",
"chunk": "2",
**extra_kwargs,
}
extra_kwargs = {"is_last_chunk": True} if _DSPY_VERSION.major >= 3 else {}
assert results[1] == {
"predict_name": "prog.predict",
"signature_field_name": _REASONING_KEYWORD,
"chunk": "reason",
**extra_kwargs,
}
def test_predict_output(dummy_model):
class MockModelReturningNonPrediction(dspy.Module):
def forward(self, question):
# Return a plain dict instead of dspy.Prediction
return {"answer": "4", "custom_field": "custom_value"}
class MockModelReturningPrediction(dspy.Module):
def forward(self, question):
# Return a dspy.Prediction
prediction = dspy.Prediction()
prediction.answer = "4"
prediction.custom_field = "custom_value"
return prediction
dspy.settings.configure(lm=dummy_model)
non_prediction_model = MockModelReturningNonPrediction()
model_info = mlflow.dspy.log_model(non_prediction_model, name="non_prediction_model")
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
result = loaded_model.predict("What is 2 + 2?")
assert isinstance(result, dict)
assert result == {"answer": "4", "custom_field": "custom_value"}
prediction_model = MockModelReturningPrediction()
model_info = mlflow.dspy.log_model(prediction_model, name="prediction_model")
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
result = loaded_model.predict("What is 2 + 2?")
assert isinstance(result, dict)
assert result == {"answer": "4", "custom_field": "custom_value"}
| NumericalCoT |
python | pandas-dev__pandas | pandas/tests/series/test_constructors.py | {
"start": 82044,
"end": 83917
} | class ____:
def test_series_constructor_datetimelike_index_coercion(self):
idx = date_range("2020-01-01", periods=5)
ser = Series(
np.random.default_rng(2).standard_normal(len(idx)), idx.astype(object)
)
# as of 2.0, we no longer silently cast the object-dtype index
# to DatetimeIndex GH#39307, GH#23598
assert not isinstance(ser.index, DatetimeIndex)
@pytest.mark.parametrize("container", [None, np.array, Series, Index])
@pytest.mark.parametrize("data", [1.0, range(4)])
def test_series_constructor_infer_multiindex(self, container, data):
indexes = [["a", "a", "b", "b"], ["x", "y", "x", "y"]]
if container is not None:
indexes = [container(ind) for ind in indexes]
multi = Series(data, index=indexes)
assert isinstance(multi.index, MultiIndex)
# TODO: make this not cast to object in pandas 3.0
@pytest.mark.skipif(
not np_version_gt2, reason="StringDType only available in numpy 2 and above"
)
@pytest.mark.parametrize(
"data",
[
["a", "b", "c"],
["a", "b", np.nan],
],
)
def test_np_string_array_object_cast(self, data):
from numpy.dtypes import StringDType
arr = np.array(data, dtype=StringDType())
res = Series(arr)
assert res.dtype == np.object_
if data[-1] is np.nan:
# as of GH#62522 the comparison op for `res==data` casts data
# using sanitize_array, which casts to 'str' dtype, which does not
# consider string 'nan' to be equal to np.nan,
# (which apparently numpy does? weird.)
assert (res.iloc[:-1] == data[:-1]).all()
assert res.iloc[-1] == "nan"
else:
assert (res == data).all()
| TestSeriesConstructorIndexCoercion |
python | weaviate__weaviate-python-client | weaviate/connect/base.py | {
"start": 1137,
"end": 6514
} | class ____(BaseModel):
http: ProtocolParams
grpc: ProtocolParams
@classmethod
def from_url(cls, url: str, grpc_port: int, grpc_secure: bool = False) -> "ConnectionParams":
parsed_url = urlparse(url)
if parsed_url.scheme not in ["http", "https"]:
raise ValueError(f"Unsupported scheme: {parsed_url.scheme}")
if parsed_url.port is None:
port = 443 if parsed_url.scheme == "https" else 80
else:
port = parsed_url.port
return cls(
http=ProtocolParams(
host=cast(str, parsed_url.hostname),
port=port,
secure=parsed_url.scheme == "https",
),
grpc=ProtocolParams(
host=cast(str, parsed_url.hostname),
port=grpc_port,
secure=grpc_secure or parsed_url.scheme == "https",
),
)
@classmethod
def from_params(
cls,
http_host: str,
http_port: int,
http_secure: bool,
grpc_host: str,
grpc_port: int,
grpc_secure: bool,
) -> "ConnectionParams":
return cls(
http=ProtocolParams(
host=http_host,
port=http_port,
secure=http_secure,
),
grpc=ProtocolParams(
host=grpc_host,
port=grpc_port,
secure=grpc_secure,
),
)
@model_validator(mode="after")
def _check_port_collision(self: T) -> T:
if self.http.host == self.grpc.host and self.http.port == self.grpc.port:
raise ValueError("http.port and grpc.port must be different if using the same host")
return self
@property
def _grpc_address(self) -> Tuple[str, int]:
return (self.grpc.host, self.grpc.port)
@property
def _grpc_target(self) -> str:
return f"{self.grpc.host}:{self.grpc.port}"
def _grpc_channel(
self, proxies: Dict[str, str], grpc_msg_size: Optional[int], is_async: bool
) -> Union[AsyncChannel, SyncChannel]:
if grpc_msg_size is None:
grpc_msg_size = MAX_GRPC_MESSAGE_LENGTH
opts = [
("grpc.max_send_message_length", grpc_msg_size),
("grpc.max_receive_message_length", grpc_msg_size),
("grpc.default_authority", self.grpc.host),
]
if (p := proxies.get("grpc")) is not None:
options: list = [*opts, ("grpc.http_proxy", p)]
else:
options = opts
if is_async:
mod = grpc.aio
else:
mod = grpc
if self.grpc.secure:
return mod.secure_channel(
target=self._grpc_target,
credentials=ssl_channel_credentials(),
options=options,
)
else:
return mod.insecure_channel(
target=self._grpc_target,
options=options,
)
@property
def _http_scheme(self) -> str:
return "https" if self.http.secure else "http"
@property
def _http_url(self) -> str:
return f"{self._http_scheme}://{self.http.host}:{self.http.port}"
def _get_proxies(proxies: Union[dict, str, Proxies, None], trust_env: bool) -> Dict[str, str]:
"""Get proxies as dict, compatible with 'requests' library.
NOTE: 'proxies' has priority over 'trust_env', i.e. if 'proxies' is NOT None, 'trust_env'
is ignored.
Args:
proxies: The proxies to use for requests. If it is a dict it should follow 'requests' library
format (https://docs.python-requests.org/en/stable/user/advanced/#proxies). If it is
a URL (str), a dict will be constructed with both 'http' and 'https' pointing to that
URL. If None, no proxies will be used.
trust_env: If True, the proxies will be read from ENV VARs (case insensitive):
HTTP_PROXY/HTTPS_PROXY. NOTE: It is ignored if 'proxies' is NOT None.
Returns:
A dictionary with proxies, either set from 'proxies' or read from ENV VARs.
"""
if proxies is not None:
if isinstance(proxies, str):
return {
"http": proxies,
"https": proxies,
"grpc": proxies,
}
if isinstance(proxies, dict):
return proxies
if isinstance(proxies, Proxies):
return proxies.model_dump(exclude_none=True)
raise TypeError(
"If 'proxies' is not None, it must be of type dict, str, or wvc.init.Proxies. "
f"Given type: {type(proxies)}."
)
if not trust_env:
return {}
http_proxy = (os.environ.get("HTTP_PROXY"), os.environ.get("http_proxy"))
https_proxy = (os.environ.get("HTTPS_PROXY"), os.environ.get("https_proxy"))
grpc_proxy = (os.environ.get("GRPC_PROXY"), os.environ.get("grpc_proxy"))
if not any(http_proxy + https_proxy + grpc_proxy):
return {}
proxies = {}
if any(http_proxy):
proxies["http"] = http_proxy[0] if http_proxy[0] else http_proxy[1]
if any(https_proxy):
proxies["https"] = https_proxy[0] if https_proxy[0] else https_proxy[1]
if any(grpc_proxy):
proxies["grpc"] = grpc_proxy[0] if grpc_proxy[0] else grpc_proxy[1]
return proxies
| ConnectionParams |
python | ray-project__ray | python/ray/train/v2/_internal/callbacks/tpu_reservation_callback.py | {
"start": 231,
"end": 1662
} | class ____(ControllerCallback):
"""A callback to handle TPU slice reservation for multi-host training."""
def on_controller_start_worker_group(
self, *, scaling_config: ScalingConfig, num_workers: int
) -> Optional[Dict[str, str]]:
"""Reserves a multi-host TPU slice before the worker group starts.
This hook is called by the TrainController. It checks if multi-host
TPUs are being used and, if so, reserves a slice.
Args:
scaling_config: The scaling configuration for the run.
num_workers: The number of workers to be started.
Returns:
A dictionary defining a `bundle_label_selector` to gang schedule
the worker group on the reserved TPU slice.
"""
bundle_label_selector = None
if scaling_config.use_tpu and num_workers > 1:
assert scaling_config.accelerator_type is not None
assert scaling_config.topology is not None
slice_name = reserve_tpu_slice(
topology=scaling_config.topology,
accelerator_type=scaling_config.accelerator_type,
)
if not slice_name:
raise RuntimeError("Failed to reserve TPU slice.")
bundle_label_selector = {
ray._raylet.RAY_NODE_TPU_SLICE_NAME_KEY: slice_name
}
return bundle_label_selector
| TPUReservationCallback |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/components.py | {
"start": 23762,
"end": 31956
} | class ____(UserComponent):
"""
A Value Box component for displaying key metrics with styling and change indicators.
Inspired by Shiny's value box component, this displays a primary value with optional
title, subtitle, theme, and change indicators.
Example:
```
# Basic value box
value_box = ValueBox(
title="Revenue",
value="$1.2M",
subtitle="Monthly Revenue",
change_indicator="Up 15% from last month"
)
current.card.append(value_box)
# Themed value box
value_box = ValueBox(
title="Total Savings",
value=50000,
theme="success",
change_indicator="Up 30% from last month"
)
current.card.append(value_box)
# Updatable value box for real-time metrics
metrics_box = ValueBox(
title="Processing Progress",
value=0,
subtitle="Items processed"
)
current.card.append(metrics_box)
for i in range(1000):
metrics_box.update(value=i, change_indicator=f"Rate: {i*2}/sec")
```
Parameters
----------
title : str, optional
The title/label for the value box (usually displayed above the value).
Must be 200 characters or less.
value : Union[str, int, float]
The main value to display prominently. Required parameter.
subtitle : str, optional
Additional descriptive text displayed below the title.
Must be 300 characters or less.
theme : str, optional
CSS class name for styling the value box. Supported themes: 'default', 'success',
'warning', 'danger', 'bg-gradient-indigo-purple'. Custom themes must be valid CSS class names.
change_indicator : str, optional
Text indicating change or additional context (e.g., "Up 30% VS PREVIOUS 30 DAYS").
Must be 200 characters or less.
"""
type = "valueBox"
REALTIME_UPDATABLE = True
# Valid built-in themes
VALID_THEMES = {
"default",
"success",
"warning",
"danger",
"bg-gradient-indigo-purple",
}
def __init__(
self,
title: Optional[str] = None,
value: Union[str, int, float] = "",
subtitle: Optional[str] = None,
theme: Optional[str] = None,
change_indicator: Optional[str] = None,
):
# Validate inputs
self._validate_title(title)
self._validate_value(value)
self._validate_subtitle(subtitle)
self._validate_theme(theme)
self._validate_change_indicator(change_indicator)
self._title = title
self._value = value
self._subtitle = subtitle
self._theme = theme
self._change_indicator = change_indicator
def update(
self,
title: Optional[str] = None,
value: Optional[Union[str, int, float]] = None,
subtitle: Optional[str] = None,
theme: Optional[str] = None,
change_indicator: Optional[str] = None,
):
"""
Update the value box with new data.
Parameters
----------
title : str, optional
New title for the value box.
value : Union[str, int, float], optional
New value to display.
subtitle : str, optional
New subtitle text.
theme : str, optional
New theme/styling class.
change_indicator : str, optional
New change indicator text.
"""
if title is not None:
self._validate_title(title)
self._title = title
if value is not None:
self._validate_value(value)
self._value = value
if subtitle is not None:
self._validate_subtitle(subtitle)
self._subtitle = subtitle
if theme is not None:
self._validate_theme(theme)
self._theme = theme
if change_indicator is not None:
self._validate_change_indicator(change_indicator)
self._change_indicator = change_indicator
def _validate_title(self, title: Optional[str]) -> None:
"""Validate title parameter."""
if title is not None:
if not isinstance(title, str):
raise TypeError(f"Title must be a string, got {type(title).__name__}")
if len(title) > 200:
raise ValueError(
f"Title must be 200 characters or less, got {len(title)} characters"
)
if not title.strip():
raise ValueError("Title cannot be empty or whitespace only")
def _validate_value(self, value: Union[str, int, float]) -> None:
"""Validate value parameter."""
if value is None:
raise ValueError("Value is required and cannot be None")
if not isinstance(value, (str, int, float)):
raise TypeError(
f"Value must be str, int, or float, got {type(value).__name__}"
)
if isinstance(value, str):
if len(value) > 100:
raise ValueError(
f"String value must be 100 characters or less, got {len(value)} characters"
)
if not value.strip():
raise ValueError("String value cannot be empty or whitespace only")
if isinstance(value, (int, float)):
if not (-1e15 <= value <= 1e15):
raise ValueError(
f"Numeric value must be between -1e15 and 1e15, got {value}"
)
def _validate_subtitle(self, subtitle: Optional[str]) -> None:
"""Validate subtitle parameter."""
if subtitle is not None:
if not isinstance(subtitle, str):
raise TypeError(
f"Subtitle must be a string, got {type(subtitle).__name__}"
)
if len(subtitle) > 300:
raise ValueError(
f"Subtitle must be 300 characters or less, got {len(subtitle)} characters"
)
if not subtitle.strip():
raise ValueError("Subtitle cannot be empty or whitespace only")
def _validate_theme(self, theme: Optional[str]) -> None:
"""Validate theme parameter."""
if theme is not None:
if not isinstance(theme, str):
raise TypeError(f"Theme must be a string, got {type(theme).__name__}")
if not theme.strip():
raise ValueError("Theme cannot be empty or whitespace only")
# Allow custom themes but warn if not in valid set
if theme not in self.VALID_THEMES:
import re
# Basic CSS class name validation
if not re.match(r"^[a-zA-Z][a-zA-Z0-9_-]*$", theme):
raise ValueError(
f"Theme must be a valid CSS class name, got '{theme}'"
)
def _validate_change_indicator(self, change_indicator: Optional[str]) -> None:
"""Validate change_indicator parameter."""
if change_indicator is not None:
if not isinstance(change_indicator, str):
raise TypeError(
f"Change indicator must be a string, got {type(change_indicator).__name__}"
)
if len(change_indicator) > 200:
raise ValueError(
f"Change indicator must be 200 characters or less, got {len(change_indicator)} characters"
)
if not change_indicator.strip():
raise ValueError("Change indicator cannot be empty or whitespace only")
@with_default_component_id
@render_safely
def render(self):
data = {
"type": self.type,
"id": self.component_id,
"value": self._value,
}
if self._title is not None:
data["title"] = self._title
if self._subtitle is not None:
data["subtitle"] = self._subtitle
if self._theme is not None:
data["theme"] = self._theme
if self._change_indicator is not None:
data["change_indicator"] = self._change_indicator
return data
| ValueBox |
python | fluentpython__example-code | 20-descriptor/bulkfood/bulkfood_v5.py | {
"start": 1746,
"end": 2094
} | class ____:
description = model.NonBlank() # <2>
weight = model.Quantity()
price = model.Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V5
| LineItem |
python | coleifer__peewee | playhouse/sqlite_udf.py | {
"start": 10309,
"end": 10860
} | class ____(_heap_agg):
def finalize(self):
if self.ct == 0:
return
elif self.ct == 1:
return 0
total = ct = 0
prev = None
while self.heap:
if total == 0:
if prev is None:
prev = heapq.heappop(self.heap)
continue
curr = heapq.heappop(self.heap)
diff = curr - prev
ct += 1
total += diff
prev = curr
return float(total) / ct
@aggregate(MATH)
| avgrange |
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 51626,
"end": 52262
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
| ClapTextIntermediate |
python | paramiko__paramiko | paramiko/ecdsakey.py | {
"start": 3154,
"end": 11653
} | class ____(PKey):
"""
Representation of an ECDSA key which can be used to sign and verify SSH2
data.
"""
_ECDSA_CURVES = _ECDSACurveSet(
[
_ECDSACurve(ec.SECP256R1, "nistp256"),
_ECDSACurve(ec.SECP384R1, "nistp384"),
_ECDSACurve(ec.SECP521R1, "nistp521"),
]
)
def __init__(
self,
msg=None,
data=None,
filename=None,
password=None,
vals=None,
file_obj=None,
# TODO 4.0: remove; it does nothing since porting to cryptography.io
validate_point=True,
):
self.verifying_key = None
self.signing_key = None
self.public_blob = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if (msg is None) and (data is not None):
msg = Message(data)
if vals is not None:
self.signing_key, self.verifying_key = vals
c_class = self.signing_key.curve.__class__
self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(c_class)
else:
# Must set ecdsa_curve first; subroutines called herein may need to
# spit out our get_name(), which relies on this.
key_type = msg.get_text()
# But this also means we need to hand it a real key/curve
# identifier, so strip out any cert business. (NOTE: could push
# that into _ECDSACurveSet.get_by_key_format_identifier(), but it
# feels more correct to do it here?)
suffix = "-cert-v01@openssh.com"
if key_type.endswith(suffix):
key_type = key_type[: -len(suffix)]
self.ecdsa_curve = self._ECDSA_CURVES.get_by_key_format_identifier(
key_type
)
key_types = self._ECDSA_CURVES.get_key_format_identifier_list()
cert_types = [
"{}-cert-v01@openssh.com".format(x) for x in key_types
]
self._check_type_and_load_cert(
msg=msg, key_type=key_types, cert_type=cert_types
)
curvename = msg.get_text()
if curvename != self.ecdsa_curve.nist_name:
raise SSHException(
"Can't handle curve of type {}".format(curvename)
)
pointinfo = msg.get_binary()
try:
key = ec.EllipticCurvePublicKey.from_encoded_point(
self.ecdsa_curve.curve_class(), pointinfo
)
self.verifying_key = key
except ValueError:
raise SSHException("Invalid public key")
@classmethod
def identifiers(cls):
return cls._ECDSA_CURVES.get_key_format_identifier_list()
# TODO 4.0: deprecate/remove
@classmethod
def supported_key_format_identifiers(cls):
return cls.identifiers()
def asbytes(self):
key = self.verifying_key
m = Message()
m.add_string(self.ecdsa_curve.key_format_identifier)
m.add_string(self.ecdsa_curve.nist_name)
numbers = key.public_numbers()
key_size_bytes = (key.curve.key_size + 7) // 8
x_bytes = deflate_long(numbers.x, add_sign_padding=False)
x_bytes = b"\x00" * (key_size_bytes - len(x_bytes)) + x_bytes
y_bytes = deflate_long(numbers.y, add_sign_padding=False)
y_bytes = b"\x00" * (key_size_bytes - len(y_bytes)) + y_bytes
point_str = four_byte + x_bytes + y_bytes
m.add_string(point_str)
return m.asbytes()
def __str__(self):
return self.asbytes()
@property
def _fields(self):
return (
self.get_name(),
self.verifying_key.public_numbers().x,
self.verifying_key.public_numbers().y,
)
def get_name(self):
return self.ecdsa_curve.key_format_identifier
def get_bits(self):
return self.ecdsa_curve.key_length
def can_sign(self):
return self.signing_key is not None
def sign_ssh_data(self, data, algorithm=None):
ecdsa = ec.ECDSA(self.ecdsa_curve.hash_object())
sig = self.signing_key.sign(data, ecdsa)
r, s = decode_dss_signature(sig)
m = Message()
m.add_string(self.ecdsa_curve.key_format_identifier)
m.add_string(self._sigencode(r, s))
return m
def verify_ssh_sig(self, data, msg):
if msg.get_text() != self.ecdsa_curve.key_format_identifier:
return False
sig = msg.get_binary()
sigR, sigS = self._sigdecode(sig)
signature = encode_dss_signature(sigR, sigS)
try:
self.verifying_key.verify(
signature, data, ec.ECDSA(self.ecdsa_curve.hash_object())
)
except InvalidSignature:
return False
else:
return True
def write_private_key_file(self, filename, password=None):
self._write_private_key_file(
filename,
self.signing_key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
def write_private_key(self, file_obj, password=None):
self._write_private_key(
file_obj,
self.signing_key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
@classmethod
def generate(cls, curve=ec.SECP256R1(), progress_func=None, bits=None):
"""
Generate a new private ECDSA key. This factory function can be used to
generate a new host key or authentication key.
:param progress_func: Not used for this type of key.
:returns: A new private key (`.ECDSAKey`) object
"""
if bits is not None:
curve = cls._ECDSA_CURVES.get_by_key_length(bits)
if curve is None:
raise ValueError("Unsupported key length: {:d}".format(bits))
curve = curve.curve_class()
private_key = ec.generate_private_key(curve, backend=default_backend())
return ECDSAKey(vals=(private_key, private_key.public_key()))
# ...internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file("EC", filename, password)
self._decode_key(data)
def _from_private_key(self, file_obj, password):
data = self._read_private_key("EC", file_obj, password)
self._decode_key(data)
def _decode_key(self, data):
pkformat, data = data
if pkformat == self._PRIVATE_KEY_FORMAT_ORIGINAL:
try:
key = serialization.load_der_private_key(
data, password=None, backend=default_backend()
)
except (
ValueError,
AssertionError,
TypeError,
UnsupportedAlgorithm,
) as e:
raise SSHException(str(e))
elif pkformat == self._PRIVATE_KEY_FORMAT_OPENSSH:
try:
msg = Message(data)
curve_name = msg.get_text()
verkey = msg.get_binary() # noqa: F841
sigkey = msg.get_mpint()
name = "ecdsa-sha2-" + curve_name
curve = self._ECDSA_CURVES.get_by_key_format_identifier(name)
if not curve:
raise SSHException("Invalid key curve identifier")
key = ec.derive_private_key(
sigkey, curve.curve_class(), default_backend()
)
except Exception as e:
# PKey._read_private_key_openssh() should check or return
# keytype - parsing could fail for any reason due to wrong type
raise SSHException(str(e))
else:
self._got_bad_key_format_id(pkformat)
self.signing_key = key
self.verifying_key = key.public_key()
curve_class = key.curve.__class__
self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(curve_class)
def _sigencode(self, r, s):
msg = Message()
msg.add_mpint(r)
msg.add_mpint(s)
return msg.asbytes()
def _sigdecode(self, sig):
msg = Message(sig)
r = msg.get_mpint()
s = msg.get_mpint()
return r, s
| ECDSAKey |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 58470,
"end": 58609
} | class ____(fixtures.TestBase):
def test_interval(self):
is_(postgresql.INTERVAL().python_type, datetime.timedelta)
| PythonTypeTest |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_colors.py | {
"start": 27211,
"end": 80194
} | class ____:
"""
Tests for `~.colors.AsinhNorm`
"""
def test_init(self):
norm0 = mcolors.AsinhNorm()
assert norm0.linear_width == 1
norm5 = mcolors.AsinhNorm(linear_width=5)
assert norm5.linear_width == 5
def test_norm(self):
norm = mcolors.AsinhNorm(2, vmin=-4, vmax=4)
vals = np.arange(-3.5, 3.5, 10)
normed_vals = norm(vals)
asinh2 = np.arcsinh(2)
expected = (2 * np.arcsinh(vals / 2) + 2 * asinh2) / (4 * asinh2)
assert_array_almost_equal(normed_vals, expected)
def _inverse_tester(norm_instance, vals):
"""
Checks if the inverse of the given normalization is working.
"""
assert_array_almost_equal(norm_instance.inverse(norm_instance(vals)), vals)
def _scalar_tester(norm_instance, vals):
"""
Checks if scalars and arrays are handled the same way.
Tests only for float.
"""
scalar_result = [norm_instance(float(v)) for v in vals]
assert_array_almost_equal(scalar_result, norm_instance(vals))
def _mask_tester(norm_instance, vals):
"""
Checks mask handling
"""
masked_array = np.ma.array(vals)
masked_array[0] = np.ma.masked
assert_array_equal(masked_array.mask, norm_instance(masked_array).mask)
@image_comparison(['levels_and_colors.png'])
def test_cmap_and_norm_from_levels_and_colors():
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
data = np.linspace(-2, 4, 49).reshape(7, 7)
levels = [-1, 2, 2.5, 3]
colors = ['red', 'green', 'blue', 'yellow', 'black']
extend = 'both'
cmap, norm = mcolors.from_levels_and_colors(levels, colors, extend=extend)
ax = plt.axes()
m = plt.pcolormesh(data, cmap=cmap, norm=norm)
plt.colorbar(m)
# Hide the axes labels (but not the colorbar ones, as they are useful)
ax.tick_params(labelleft=False, labelbottom=False)
@image_comparison(baseline_images=['boundarynorm_and_colorbar'],
extensions=['png'], tol=1.0)
def test_boundarynorm_and_colorbarbase():
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
# Make a figure and axes with dimensions as desired.
fig = plt.figure()
ax1 = fig.add_axes((0.05, 0.80, 0.9, 0.15))
ax2 = fig.add_axes((0.05, 0.475, 0.9, 0.15))
ax3 = fig.add_axes((0.05, 0.15, 0.9, 0.15))
# Set the colormap and bounds
bounds = [-1, 2, 5, 7, 12, 15]
cmap = mpl.colormaps['viridis']
# Default behavior
norm = mcolors.BoundaryNorm(bounds, cmap.N)
cb1 = mcolorbar.ColorbarBase(ax1, cmap=cmap, norm=norm, extend='both',
orientation='horizontal', spacing='uniform')
# New behavior
norm = mcolors.BoundaryNorm(bounds, cmap.N, extend='both')
cb2 = mcolorbar.ColorbarBase(ax2, cmap=cmap, norm=norm,
orientation='horizontal')
# User can still force to any extend='' if really needed
norm = mcolors.BoundaryNorm(bounds, cmap.N, extend='both')
cb3 = mcolorbar.ColorbarBase(ax3, cmap=cmap, norm=norm,
extend='neither', orientation='horizontal')
def test_cmap_and_norm_from_levels_and_colors2():
levels = [-1, 2, 2.5, 3]
colors = ['red', (0, 1, 0), 'blue', (0.5, 0.5, 0.5), (0.0, 0.0, 0.0, 1.0)]
clr = mcolors.to_rgba_array(colors)
bad = (0.1, 0.1, 0.1, 0.1)
no_color = (0.0, 0.0, 0.0, 0.0)
masked_value = 'masked_value'
# Define the test values which are of interest.
# Note: levels are lev[i] <= v < lev[i+1]
tests = [('both', None, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: clr[4],
3.5: clr[4],
masked_value: bad}),
('min', -1, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: no_color,
3.5: no_color,
masked_value: bad}),
('max', -1, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: clr[3],
3.5: clr[3],
masked_value: bad}),
('neither', -2, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: no_color,
3.5: no_color,
masked_value: bad}),
]
for extend, i1, cases in tests:
cmap, norm = mcolors.from_levels_and_colors(levels, colors[0:i1],
extend=extend)
cmap = cmap.with_extremes(bad=bad)
for d_val, expected_color in cases.items():
if d_val == masked_value:
d_val = np.ma.array([1], mask=True)
else:
d_val = [d_val]
assert_array_equal(expected_color, cmap(norm(d_val))[0],
f'With extend={extend!r} and data '
f'value={d_val!r}')
with pytest.raises(ValueError):
mcolors.from_levels_and_colors(levels, colors)
def test_rgb_hsv_round_trip():
for a_shape in [(500, 500, 3), (500, 3), (1, 3), (3,)]:
np.random.seed(0)
tt = np.random.random(a_shape)
assert_array_almost_equal(
tt, mcolors.hsv_to_rgb(mcolors.rgb_to_hsv(tt)))
assert_array_almost_equal(
tt, mcolors.rgb_to_hsv(mcolors.hsv_to_rgb(tt)))
def test_autoscale_masked():
# Test for #2336. Previously fully masked data would trigger a ValueError.
data = np.ma.masked_all((12, 20))
plt.pcolor(data)
plt.draw()
@image_comparison(['light_source_shading_topo.png'])
def test_light_source_topo_surface():
"""Shades a DEM using different v.e.'s and blend modes."""
dem = cbook.get_sample_data('jacksboro_fault_dem.npz')
elev = dem['elevation']
dx, dy = dem['dx'], dem['dy']
# Get the true cellsize in meters for accurate vertical exaggeration
# Convert from decimal degrees to meters
dx = 111320.0 * dx * np.cos(dem['ymin'])
dy = 111320.0 * dy
ls = mcolors.LightSource(315, 45)
cmap = cm.gist_earth
fig, axs = plt.subplots(nrows=3, ncols=3)
for row, mode in zip(axs, ['hsv', 'overlay', 'soft']):
for ax, ve in zip(row, [0.1, 1, 10]):
rgb = ls.shade(elev, cmap, vert_exag=ve, dx=dx, dy=dy,
blend_mode=mode)
ax.imshow(rgb)
ax.set(xticks=[], yticks=[])
def test_light_source_shading_default():
"""
Array comparison test for the default "hsv" blend mode. Ensure the
default result doesn't change without warning.
"""
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z = 10 * np.cos(x**2 + y**2)
cmap = plt.colormaps["copper"]
ls = mcolors.LightSource(315, 45)
rgb = ls.shade(z, cmap)
# Result stored transposed and rounded for more compact display...
expect = np.array(
[[[0.00, 0.45, 0.90, 0.90, 0.82, 0.62, 0.28, 0.00],
[0.45, 0.94, 0.99, 1.00, 1.00, 0.96, 0.65, 0.17],
[0.90, 0.99, 1.00, 1.00, 1.00, 1.00, 0.94, 0.35],
[0.90, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 0.49],
[0.82, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 0.41],
[0.62, 0.96, 1.00, 1.00, 1.00, 1.00, 0.90, 0.07],
[0.28, 0.65, 0.94, 1.00, 1.00, 0.90, 0.35, 0.01],
[0.00, 0.17, 0.35, 0.49, 0.41, 0.07, 0.01, 0.00]],
[[0.00, 0.28, 0.59, 0.72, 0.62, 0.40, 0.18, 0.00],
[0.28, 0.78, 0.93, 0.92, 0.83, 0.66, 0.39, 0.11],
[0.59, 0.93, 0.99, 1.00, 0.92, 0.75, 0.50, 0.21],
[0.72, 0.92, 1.00, 0.99, 0.93, 0.76, 0.51, 0.18],
[0.62, 0.83, 0.92, 0.93, 0.87, 0.68, 0.42, 0.08],
[0.40, 0.66, 0.75, 0.76, 0.68, 0.52, 0.23, 0.02],
[0.18, 0.39, 0.50, 0.51, 0.42, 0.23, 0.00, 0.00],
[0.00, 0.11, 0.21, 0.18, 0.08, 0.02, 0.00, 0.00]],
[[0.00, 0.18, 0.38, 0.46, 0.39, 0.26, 0.11, 0.00],
[0.18, 0.50, 0.70, 0.75, 0.64, 0.44, 0.25, 0.07],
[0.38, 0.70, 0.91, 0.98, 0.81, 0.51, 0.29, 0.13],
[0.46, 0.75, 0.98, 0.96, 0.84, 0.48, 0.22, 0.12],
[0.39, 0.64, 0.81, 0.84, 0.71, 0.31, 0.11, 0.05],
[0.26, 0.44, 0.51, 0.48, 0.31, 0.10, 0.03, 0.01],
[0.11, 0.25, 0.29, 0.22, 0.11, 0.03, 0.00, 0.00],
[0.00, 0.07, 0.13, 0.12, 0.05, 0.01, 0.00, 0.00]],
[[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]]
]).T
assert_array_almost_equal(rgb, expect, decimal=2)
def test_light_source_shading_empty_mask():
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z0 = 10 * np.cos(x**2 + y**2)
z1 = np.ma.array(z0)
cmap = plt.colormaps["copper"]
ls = mcolors.LightSource(315, 45)
rgb0 = ls.shade(z0, cmap)
rgb1 = ls.shade(z1, cmap)
assert_array_almost_equal(rgb0, rgb1)
# Numpy 1.9.1 fixed a bug in masked arrays which resulted in
# additional elements being masked when calculating the gradient thus
# the output is different with earlier numpy versions.
def test_light_source_masked_shading():
"""
Array comparison test for a surface with a masked portion. Ensures that
we don't wind up with "fringes" of odd colors around masked regions.
"""
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z = 10 * np.cos(x**2 + y**2)
z = np.ma.masked_greater(z, 9.9)
cmap = plt.colormaps["copper"]
ls = mcolors.LightSource(315, 45)
rgb = ls.shade(z, cmap)
# Result stored transposed and rounded for more compact display...
expect = np.array(
[[[0.00, 0.46, 0.91, 0.91, 0.84, 0.64, 0.29, 0.00],
[0.46, 0.96, 1.00, 1.00, 1.00, 0.97, 0.67, 0.18],
[0.91, 1.00, 1.00, 1.00, 1.00, 1.00, 0.96, 0.36],
[0.91, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 0.51],
[0.84, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 0.44],
[0.64, 0.97, 1.00, 1.00, 1.00, 1.00, 0.94, 0.09],
[0.29, 0.67, 0.96, 1.00, 1.00, 0.94, 0.38, 0.01],
[0.00, 0.18, 0.36, 0.51, 0.44, 0.09, 0.01, 0.00]],
[[0.00, 0.29, 0.61, 0.75, 0.64, 0.41, 0.18, 0.00],
[0.29, 0.81, 0.95, 0.93, 0.85, 0.68, 0.40, 0.11],
[0.61, 0.95, 1.00, 0.78, 0.78, 0.77, 0.52, 0.22],
[0.75, 0.93, 0.78, 0.00, 0.00, 0.78, 0.54, 0.19],
[0.64, 0.85, 0.78, 0.00, 0.00, 0.78, 0.45, 0.08],
[0.41, 0.68, 0.77, 0.78, 0.78, 0.55, 0.25, 0.02],
[0.18, 0.40, 0.52, 0.54, 0.45, 0.25, 0.00, 0.00],
[0.00, 0.11, 0.22, 0.19, 0.08, 0.02, 0.00, 0.00]],
[[0.00, 0.19, 0.39, 0.48, 0.41, 0.26, 0.12, 0.00],
[0.19, 0.52, 0.73, 0.78, 0.66, 0.46, 0.26, 0.07],
[0.39, 0.73, 0.95, 0.50, 0.50, 0.53, 0.30, 0.14],
[0.48, 0.78, 0.50, 0.00, 0.00, 0.50, 0.23, 0.12],
[0.41, 0.66, 0.50, 0.00, 0.00, 0.50, 0.11, 0.05],
[0.26, 0.46, 0.53, 0.50, 0.50, 0.11, 0.03, 0.01],
[0.12, 0.26, 0.30, 0.23, 0.11, 0.03, 0.00, 0.00],
[0.00, 0.07, 0.14, 0.12, 0.05, 0.01, 0.00, 0.00]],
[[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]],
]).T
assert_array_almost_equal(rgb, expect, decimal=2)
def test_light_source_hillshading():
"""
Compare the current hillshading method against one that should be
mathematically equivalent. Illuminates a cone from a range of angles.
"""
def alternative_hillshade(azimuth, elev, z):
illum = _sph2cart(*_azimuth2math(azimuth, elev))
illum = np.array(illum)
dy, dx = np.gradient(-z)
dy = -dy
dz = np.ones_like(dy)
normals = np.dstack([dx, dy, dz])
normals /= np.linalg.norm(normals, axis=2)[..., None]
intensity = np.tensordot(normals, illum, axes=(2, 0))
intensity -= intensity.min()
intensity /= np.ptp(intensity)
return intensity
y, x = np.mgrid[5:0:-1, :5]
z = -np.hypot(x - x.mean(), y - y.mean())
for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
ls = mcolors.LightSource(az, elev)
h1 = ls.hillshade(z)
h2 = alternative_hillshade(az, elev, z)
assert_array_almost_equal(h1, h2)
def test_light_source_planar_hillshading():
"""
Ensure that the illumination intensity is correct for planar surfaces.
"""
def plane(azimuth, elevation, x, y):
"""
Create a plane whose normal vector is at the given azimuth and
elevation.
"""
theta, phi = _azimuth2math(azimuth, elevation)
a, b, c = _sph2cart(theta, phi)
z = -(a*x + b*y) / c
return z
def angled_plane(azimuth, elevation, angle, x, y):
"""
Create a plane whose normal vector is at an angle from the given
azimuth and elevation.
"""
elevation = elevation + angle
if elevation > 90:
azimuth = (azimuth + 180) % 360
elevation = (90 - elevation) % 90
return plane(azimuth, elevation, x, y)
y, x = np.mgrid[5:0:-1, :5]
for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
ls = mcolors.LightSource(az, elev)
# Make a plane at a range of angles to the illumination
for angle in range(0, 105, 15):
z = angled_plane(az, elev, angle, x, y)
h = ls.hillshade(z)
assert_array_almost_equal(h, np.cos(np.radians(angle)))
def test_color_names():
assert mcolors.to_hex("blue") == "#0000ff"
assert mcolors.to_hex("xkcd:blue") == "#0343df"
assert mcolors.to_hex("tab:blue") == "#1f77b4"
def _sph2cart(theta, phi):
x = np.cos(theta) * np.sin(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(phi)
return x, y, z
def _azimuth2math(azimuth, elevation):
"""
Convert from clockwise-from-north and up-from-horizontal to mathematical
conventions.
"""
theta = np.radians((90 - azimuth) % 360)
phi = np.radians(90 - elevation)
return theta, phi
def test_pandas_iterable(pd):
# Using a list or series yields equivalent
# colormaps, i.e the series isn't seen as
# a single color
lst = ['red', 'blue', 'green']
s = pd.Series(lst)
cm1 = mcolors.ListedColormap(lst)
cm2 = mcolors.ListedColormap(s)
assert_array_equal(cm1.colors, cm2.colors)
@pytest.mark.parametrize('name', sorted(mpl.colormaps()))
def test_colormap_reversing(name):
"""
Check the generated _lut data of a colormap and corresponding reversed
colormap if they are almost the same.
"""
cmap = mpl.colormaps[name]
cmap_r = cmap.reversed()
if not cmap_r._isinit:
cmap._init()
cmap_r._init()
assert_array_almost_equal(cmap._lut[:-3], cmap_r._lut[-4::-1])
# Test the bad, over, under values too
assert_array_almost_equal(cmap(-np.inf), cmap_r(np.inf))
assert_array_almost_equal(cmap(np.inf), cmap_r(-np.inf))
assert_array_almost_equal(cmap(np.nan), cmap_r(np.nan))
def test_has_alpha_channel():
assert mcolors._has_alpha_channel((0, 0, 0, 0))
assert mcolors._has_alpha_channel([1, 1, 1, 1])
assert mcolors._has_alpha_channel('#fff8')
assert mcolors._has_alpha_channel('#0f0f0f80')
assert mcolors._has_alpha_channel(('r', 0.5))
assert mcolors._has_alpha_channel(([1, 1, 1, 1], None))
assert not mcolors._has_alpha_channel('blue') # 4-char string!
assert not mcolors._has_alpha_channel('0.25')
assert not mcolors._has_alpha_channel('r')
assert not mcolors._has_alpha_channel((1, 0, 0))
assert not mcolors._has_alpha_channel('#fff')
assert not mcolors._has_alpha_channel('#0f0f0f')
assert not mcolors._has_alpha_channel(('r', None))
assert not mcolors._has_alpha_channel(([1, 1, 1], None))
def test_cn():
matplotlib.rcParams['axes.prop_cycle'] = cycler('color',
['blue', 'r'])
assert mcolors.to_hex("C0") == '#0000ff'
assert mcolors.to_hex("C1") == '#ff0000'
matplotlib.rcParams['axes.prop_cycle'] = cycler('color',
['xkcd:blue', 'r'])
assert mcolors.to_hex("C0") == '#0343df'
assert mcolors.to_hex("C1") == '#ff0000'
assert mcolors.to_hex("C10") == '#0343df'
assert mcolors.to_hex("C11") == '#ff0000'
matplotlib.rcParams['axes.prop_cycle'] = cycler('color', ['8e4585', 'r'])
assert mcolors.to_hex("C0") == '#8e4585'
# if '8e4585' gets parsed as a float before it gets detected as a hex
# colour it will be interpreted as a very large number.
# this mustn't happen.
assert mcolors.to_rgb("C0")[0] != np.inf
def test_conversions():
# to_rgba_array("none") returns a (0, 4) array.
assert_array_equal(mcolors.to_rgba_array("none"), np.zeros((0, 4)))
assert_array_equal(mcolors.to_rgba_array([]), np.zeros((0, 4)))
# a list of grayscale levels, not a single color.
assert_array_equal(
mcolors.to_rgba_array([".2", ".5", ".8"]),
np.vstack([mcolors.to_rgba(c) for c in [".2", ".5", ".8"]]))
# alpha is properly set.
assert mcolors.to_rgba((1, 1, 1), .5) == (1, 1, 1, .5)
assert mcolors.to_rgba(".1", .5) == (.1, .1, .1, .5)
# builtin round differs between py2 and py3.
assert mcolors.to_hex((.7, .7, .7)) == "#b2b2b2"
# hex roundtrip.
hex_color = "#1234abcd"
assert mcolors.to_hex(mcolors.to_rgba(hex_color), keep_alpha=True) == \
hex_color
def test_conversions_masked():
x1 = np.ma.array(['k', 'b'], mask=[True, False])
x2 = np.ma.array([[0, 0, 0, 1], [0, 0, 1, 1]])
x2[0] = np.ma.masked
assert mcolors.to_rgba(x1[0]) == (0, 0, 0, 0)
assert_array_equal(mcolors.to_rgba_array(x1),
[[0, 0, 0, 0], [0, 0, 1, 1]])
assert_array_equal(mcolors.to_rgba_array(x2), mcolors.to_rgba_array(x1))
def test_to_rgba_array_single_str():
# single color name is valid
assert_array_equal(mcolors.to_rgba_array("red"), [(1, 0, 0, 1)])
# single char color sequence is invalid
with pytest.raises(ValueError,
match="'rgb' is not a valid color value."):
array = mcolors.to_rgba_array("rgb")
def test_to_rgba_array_2tuple_str():
expected = np.array([[0, 0, 0, 1], [1, 1, 1, 1]])
assert_array_equal(mcolors.to_rgba_array(("k", "w")), expected)
def test_to_rgba_array_alpha_array():
with pytest.raises(ValueError, match="The number of colors must match"):
mcolors.to_rgba_array(np.ones((5, 3), float), alpha=np.ones((2,)))
alpha = [0.5, 0.6]
c = mcolors.to_rgba_array(np.ones((2, 3), float), alpha=alpha)
assert_array_equal(c[:, 3], alpha)
c = mcolors.to_rgba_array(['r', 'g'], alpha=alpha)
assert_array_equal(c[:, 3], alpha)
def test_to_rgba_array_accepts_color_alpha_tuple():
assert_array_equal(
mcolors.to_rgba_array(('black', 0.9)),
[[0, 0, 0, 0.9]])
def test_to_rgba_array_explicit_alpha_overrides_tuple_alpha():
assert_array_equal(
mcolors.to_rgba_array(('black', 0.9), alpha=0.5),
[[0, 0, 0, 0.5]])
def test_to_rgba_array_accepts_color_alpha_tuple_with_multiple_colors():
color_array = np.array([[1., 1., 1., 1.], [0., 0., 1., 0.]])
assert_array_equal(
mcolors.to_rgba_array((color_array, 0.2)),
[[1., 1., 1., 0.2], [0., 0., 1., 0.2]])
color_sequence = [[1., 1., 1., 1.], [0., 0., 1., 0.]]
assert_array_equal(
mcolors.to_rgba_array((color_sequence, 0.4)),
[[1., 1., 1., 0.4], [0., 0., 1., 0.4]])
def test_to_rgba_array_error_with_color_invalid_alpha_tuple():
with pytest.raises(ValueError, match="'alpha' must be between 0 and 1,"):
mcolors.to_rgba_array(('black', 2.0))
@pytest.mark.parametrize('rgba_alpha',
[('white', 0.5), ('#ffffff', 0.5), ('#ffffff00', 0.5),
((1.0, 1.0, 1.0, 1.0), 0.5)])
def test_to_rgba_accepts_color_alpha_tuple(rgba_alpha):
assert mcolors.to_rgba(rgba_alpha) == (1, 1, 1, 0.5)
def test_to_rgba_explicit_alpha_overrides_tuple_alpha():
assert mcolors.to_rgba(('red', 0.1), alpha=0.9) == (1, 0, 0, 0.9)
def test_to_rgba_error_with_color_invalid_alpha_tuple():
with pytest.raises(ValueError, match="'alpha' must be between 0 and 1"):
mcolors.to_rgba(('blue', 2.0))
@pytest.mark.parametrize("bytes", (True, False))
def test_scalarmappable_to_rgba(bytes):
sm = cm.ScalarMappable()
alpha_1 = 255 if bytes else 1
# uint8 RGBA
x = np.ones((2, 3, 4), dtype=np.uint8)
expected = x.copy() if bytes else x.astype(np.float32)/255
np.testing.assert_almost_equal(sm.to_rgba(x, bytes=bytes), expected)
# uint8 RGB
expected[..., 3] = alpha_1
np.testing.assert_almost_equal(sm.to_rgba(x[..., :3], bytes=bytes), expected)
# uint8 masked RGBA
xm = np.ma.masked_array(x, mask=np.zeros_like(x))
xm.mask[0, 0, 0] = True
expected = x.copy() if bytes else x.astype(np.float32)/255
expected[0, 0, 3] = 0
np.testing.assert_almost_equal(sm.to_rgba(xm, bytes=bytes), expected)
# uint8 masked RGB
expected[..., 3] = alpha_1
expected[0, 0, 3] = 0
np.testing.assert_almost_equal(sm.to_rgba(xm[..., :3], bytes=bytes), expected)
# float RGBA
x = np.ones((2, 3, 4), dtype=float) * 0.5
expected = (x * 255).astype(np.uint8) if bytes else x.copy()
np.testing.assert_almost_equal(sm.to_rgba(x, bytes=bytes), expected)
# float RGB
expected[..., 3] = alpha_1
np.testing.assert_almost_equal(sm.to_rgba(x[..., :3], bytes=bytes), expected)
# float masked RGBA
xm = np.ma.masked_array(x, mask=np.zeros_like(x))
xm.mask[0, 0, 0] = True
expected = (x * 255).astype(np.uint8) if bytes else x.copy()
expected[0, 0, 3] = 0
np.testing.assert_almost_equal(sm.to_rgba(xm, bytes=bytes), expected)
# float masked RGB
expected[..., 3] = alpha_1
expected[0, 0, 3] = 0
np.testing.assert_almost_equal(sm.to_rgba(xm[..., :3], bytes=bytes), expected)
@pytest.mark.parametrize("bytes", (True, False))
def test_scalarmappable_nan_to_rgba(bytes):
sm = cm.ScalarMappable()
# RGBA
x = np.ones((2, 3, 4), dtype=float) * 0.5
x[0, 0, 0] = np.nan
expected = x.copy()
expected[0, 0, :] = 0
if bytes:
expected = (expected * 255).astype(np.uint8)
np.testing.assert_almost_equal(sm.to_rgba(x, bytes=bytes), expected)
assert np.any(np.isnan(x)) # Input array should not be changed
# RGB
expected[..., 3] = 255 if bytes else 1
expected[0, 0, 3] = 0
np.testing.assert_almost_equal(sm.to_rgba(x[..., :3], bytes=bytes), expected)
assert np.any(np.isnan(x)) # Input array should not be changed
# Out-of-range fail
x[1, 0, 0] = 42
with pytest.raises(ValueError, match=r'\[0,1\] range'):
sm.to_rgba(x[..., :3], bytes=bytes)
def test_failed_conversions():
with pytest.raises(ValueError):
mcolors.to_rgba('5')
with pytest.raises(ValueError):
mcolors.to_rgba('-1')
with pytest.raises(ValueError):
mcolors.to_rgba('nan')
with pytest.raises(ValueError):
mcolors.to_rgba('unknown_color')
with pytest.raises(ValueError):
# Gray must be a string to distinguish 3-4 grays from RGB or RGBA.
mcolors.to_rgba(0.4)
def test_grey_gray():
color_mapping = mcolors._colors_full_map
for k in color_mapping.keys():
if 'grey' in k:
assert color_mapping[k] == color_mapping[k.replace('grey', 'gray')]
if 'gray' in k:
assert color_mapping[k] == color_mapping[k.replace('gray', 'grey')]
def test_tableau_order():
dflt_cycle = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
assert list(mcolors.TABLEAU_COLORS.values()) == dflt_cycle
def test_ndarray_subclass_norm():
# Emulate an ndarray subclass that handles units
# which objects when adding or subtracting with other
# arrays. See #6622 and #8696
class MyArray(np.ndarray):
def __isub__(self, other): # type: ignore[misc]
raise RuntimeError
def __add__(self, other):
raise RuntimeError
data = np.arange(-10, 10, 1, dtype=float).reshape((10, 2))
mydata = data.view(MyArray)
for norm in [mcolors.Normalize(), mcolors.LogNorm(),
mcolors.SymLogNorm(3, vmax=5, linscale=1, base=np.e),
mcolors.Normalize(vmin=mydata.min(), vmax=mydata.max()),
mcolors.SymLogNorm(3, vmin=mydata.min(), vmax=mydata.max(),
base=np.e),
mcolors.PowerNorm(1)]:
assert_array_equal(norm(mydata), norm(data))
fig, ax = plt.subplots()
ax.imshow(mydata, norm=norm)
fig.canvas.draw() # Check that no warning is emitted.
def test_same_color():
assert mcolors.same_color('k', (0, 0, 0))
assert not mcolors.same_color('w', (1, 1, 0))
assert mcolors.same_color(['red', 'blue'], ['r', 'b'])
assert mcolors.same_color('none', 'none')
assert not mcolors.same_color('none', 'red')
with pytest.raises(ValueError):
mcolors.same_color(['r', 'g', 'b'], ['r'])
with pytest.raises(ValueError):
mcolors.same_color(['red', 'green'], 'none')
def test_hex_shorthand_notation():
assert mcolors.same_color("#123", "#112233")
assert mcolors.same_color("#123a", "#112233aa")
def test_repr_png():
cmap = mpl.colormaps['viridis']
png = cmap._repr_png_()
assert len(png) > 0
img = Image.open(BytesIO(png))
assert img.width > 0
assert img.height > 0
assert 'Title' in img.text
assert 'Description' in img.text
assert 'Author' in img.text
assert 'Software' in img.text
def test_repr_html():
cmap = mpl.colormaps['viridis']
html = cmap._repr_html_()
assert len(html) > 0
png = cmap._repr_png_()
assert base64.b64encode(png).decode('ascii') in html
assert cmap.name in html
assert html.startswith('<div')
assert html.endswith('</div>')
def test_get_under_over_bad():
cmap = mpl.colormaps['viridis']
assert_array_equal(cmap.get_under(), cmap(-np.inf))
assert_array_equal(cmap.get_over(), cmap(np.inf))
assert_array_equal(cmap.get_bad(), cmap(np.nan))
@pytest.mark.parametrize('kind', ('over', 'under', 'bad'))
def test_non_mutable_get_values(kind):
cmap = copy.copy(mpl.colormaps['viridis'])
init_value = getattr(cmap, f'get_{kind}')()
with pytest.warns(PendingDeprecationWarning):
getattr(cmap, f'set_{kind}')('k')
black_value = getattr(cmap, f'get_{kind}')()
assert np.all(black_value == [0, 0, 0, 1])
assert not np.all(init_value == black_value)
def test_colormap_alpha_array():
cmap = mpl.colormaps['viridis']
vals = [-1, 0.5, 2] # under, valid, over
with pytest.raises(ValueError, match="alpha is array-like but"):
cmap(vals, alpha=[1, 1, 1, 1])
alpha = np.array([0.1, 0.2, 0.3])
c = cmap(vals, alpha=alpha)
assert_array_equal(c[:, -1], alpha)
c = cmap(vals, alpha=alpha, bytes=True)
assert_array_equal(c[:, -1], (alpha * 255).astype(np.uint8))
def test_colormap_bad_data_with_alpha():
cmap = mpl.colormaps['viridis']
c = cmap(np.nan, alpha=0.5)
assert c == (0, 0, 0, 0)
c = cmap([0.5, np.nan], alpha=0.5)
assert_array_equal(c[1], (0, 0, 0, 0))
c = cmap([0.5, np.nan], alpha=[0.1, 0.2])
assert_array_equal(c[1], (0, 0, 0, 0))
c = cmap([[np.nan, 0.5], [0, 0]], alpha=0.5)
assert_array_equal(c[0, 0], (0, 0, 0, 0))
c = cmap([[np.nan, 0.5], [0, 0]], alpha=np.full((2, 2), 0.5))
assert_array_equal(c[0, 0], (0, 0, 0, 0))
def test_2d_to_rgba():
color = np.array([0.1, 0.2, 0.3])
rgba_1d = mcolors.to_rgba(color.reshape(-1))
rgba_2d = mcolors.to_rgba(color.reshape((1, -1)))
assert rgba_1d == rgba_2d
def test_set_dict_to_rgba():
# downstream libraries do this...
# note we can't test this because it is not well-ordered
# so just smoketest:
colors = {(0, .5, 1), (1, .2, .5), (.4, 1, .2)}
res = mcolors.to_rgba_array(colors)
palette = {"red": (1, 0, 0), "green": (0, 1, 0), "blue": (0, 0, 1)}
res = mcolors.to_rgba_array(palette.values())
exp = np.eye(3)
np.testing.assert_array_almost_equal(res[:, :-1], exp)
def test_norm_deepcopy():
norm = mcolors.LogNorm()
norm.vmin = 0.0002
norm2 = copy.deepcopy(norm)
assert norm2.vmin == norm.vmin
assert isinstance(norm2._scale, mscale.LogScale)
norm = mcolors.Normalize()
norm.vmin = 0.0002
norm2 = copy.deepcopy(norm)
assert norm2._scale is None
assert norm2.vmin == norm.vmin
def test_set_clim_emits_single_callback():
data = np.array([[1, 2], [3, 4]])
fig, ax = plt.subplots()
image = ax.imshow(data, cmap='viridis')
callback = unittest.mock.Mock()
image.norm.callbacks.connect('changed', callback)
callback.assert_not_called()
# Call set_clim() to update the limits
image.set_clim(1, 5)
# Assert that only one "changed" callback is sent after calling set_clim()
callback.assert_called_once()
def test_norm_callback():
increment = unittest.mock.Mock(return_value=None)
norm = mcolors.Normalize()
norm.callbacks.connect('changed', increment)
# Haven't updated anything, so call count should be 0
assert increment.call_count == 0
# Now change vmin and vmax to test callbacks
norm.vmin = 1
assert increment.call_count == 1
norm.vmax = 5
assert increment.call_count == 2
# callback shouldn't be called if setting to the same value
norm.vmin = 1
assert increment.call_count == 2
norm.vmax = 5
assert increment.call_count == 2
# We only want autoscale() calls to send out one update signal
increment.call_count = 0
norm.autoscale([0, 1, 2])
assert increment.call_count == 1
def test_scalarmappable_norm_update():
norm = mcolors.Normalize()
sm = matplotlib.cm.ScalarMappable(norm=norm, cmap='plasma')
# sm doesn't have a stale attribute at first, set it to False
sm.stale = False
# The mappable should be stale after updating vmin/vmax
norm.vmin = 5
assert sm.stale
sm.stale = False
norm.vmax = 5
assert sm.stale
sm.stale = False
norm.clip = True
assert sm.stale
# change to the CenteredNorm and TwoSlopeNorm to test those
# Also make sure that updating the norm directly and with
# set_norm both update the Norm callback
norm = mcolors.CenteredNorm()
sm.norm = norm
sm.stale = False
norm.vcenter = 1
assert sm.stale
norm = mcolors.TwoSlopeNorm(vcenter=0, vmin=-1, vmax=1)
sm.set_norm(norm)
sm.stale = False
norm.vcenter = 1
assert sm.stale
@check_figures_equal()
def test_norm_update_figs(fig_test, fig_ref):
ax_ref = fig_ref.add_subplot()
ax_test = fig_test.add_subplot()
z = np.arange(100).reshape((10, 10))
ax_ref.imshow(z, norm=mcolors.Normalize(10, 90))
# Create the norm beforehand with different limits and then update
# after adding to the plot
norm = mcolors.Normalize(0, 1)
ax_test.imshow(z, norm=norm)
# Force initial draw to make sure it isn't already stale
fig_test.canvas.draw()
norm.vmin, norm.vmax = 10, 90
def test_make_norm_from_scale_name():
logitnorm = mcolors.make_norm_from_scale(
mscale.LogitScale, mcolors.Normalize)
assert logitnorm.__name__ == logitnorm.__qualname__ == "LogitScaleNorm"
def test_color_sequences():
# basic access
assert plt.color_sequences is matplotlib.color_sequences # same registry
assert list(plt.color_sequences) == [
'tab10', 'tab20', 'tab20b', 'tab20c', 'Pastel1', 'Pastel2', 'Paired',
'Accent', 'Dark2', 'Set1', 'Set2', 'Set3', 'petroff6', 'petroff8',
'petroff10']
assert len(plt.color_sequences['tab10']) == 10
assert len(plt.color_sequences['tab20']) == 20
tab_colors = [
'tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple',
'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']
for seq_color, tab_color in zip(plt.color_sequences['tab10'], tab_colors):
assert mcolors.same_color(seq_color, tab_color)
# registering
with pytest.raises(ValueError, match="reserved name"):
plt.color_sequences.register('tab10', ['r', 'g', 'b'])
with pytest.raises(ValueError, match="not a valid color specification"):
plt.color_sequences.register('invalid', ['not a color'])
rgb_colors = ['r', 'g', 'b']
plt.color_sequences.register('rgb', rgb_colors)
assert plt.color_sequences['rgb'] == ['r', 'g', 'b']
# should not affect the registered sequence because input is copied
rgb_colors.append('c')
assert plt.color_sequences['rgb'] == ['r', 'g', 'b']
# should not affect the registered sequence because returned list is a copy
plt.color_sequences['rgb'].append('c')
assert plt.color_sequences['rgb'] == ['r', 'g', 'b']
# unregister
plt.color_sequences.unregister('rgb')
with pytest.raises(KeyError):
plt.color_sequences['rgb'] # rgb is gone
plt.color_sequences.unregister('rgb') # multiple unregisters are ok
with pytest.raises(ValueError, match="Cannot unregister builtin"):
plt.color_sequences.unregister('tab10')
def test_cm_set_cmap_error():
sm = cm.ScalarMappable()
# Pick a name we are pretty sure will never be a colormap name
bad_cmap = 'AardvarksAreAwkward'
with pytest.raises(ValueError, match=bad_cmap):
sm.set_cmap(bad_cmap)
def test_set_cmap_mismatched_name():
cmap = matplotlib.colormaps["viridis"].with_extremes(over='r')
# register it with different names
cmap.name = "test-cmap"
matplotlib.colormaps.register(name='wrong-cmap', cmap=cmap)
plt.set_cmap("wrong-cmap")
cmap_returned = plt.get_cmap("wrong-cmap")
assert cmap_returned == cmap
assert cmap_returned.name == "wrong-cmap"
def test_cmap_alias_names():
assert matplotlib.colormaps["gray"].name == "gray" # original
assert matplotlib.colormaps["grey"].name == "grey" # alias
def test_to_rgba_array_none_color_with_alpha_param():
# effective alpha for color "none" must always be 0 to achieve a vanishing color
# even explicit alpha must be ignored
c = ["blue", "none"]
alpha = [1, 1]
assert_array_equal(
to_rgba_array(c, alpha), [[0., 0., 1., 1.], [0., 0., 0., 0.]]
)
@pytest.mark.parametrize('input, expected',
[('red', True),
(('red', 0.5), True),
(('red', 2), False),
(['red', 0.5], False),
(('red', 'blue'), False),
(['red', 'blue'], False),
('C3', True),
(('C3', 0.5), True)])
def test_is_color_like(input, expected):
assert is_color_like(input) is expected
def test_colorizer_vmin_vmax():
ca = mcolorizer.Colorizer()
assert ca.vmin is None
assert ca.vmax is None
ca.vmin = 1
ca.vmax = 3
assert ca.vmin == 1.0
assert ca.vmax == 3.0
assert ca.norm.vmin == 1.0
assert ca.norm.vmax == 3.0
def test_LinearSegmentedColormap_from_list_color_alpha_tuple():
"""
GitHub issue #29042: A bug in 'from_list' causes an error
when passing a tuple (str, float) where the string is a
color name or grayscale value and float is an alpha value.
"""
colors = [("red", 0.3), ("0.42", 0.1), "green"]
cmap = mcolors.LinearSegmentedColormap.from_list("lsc", colors, N=3)
assert_array_almost_equal(cmap([.0, 0.5, 1.]), to_rgba_array(colors))
@pytest.mark.parametrize("colors",
[[(0.42, "blue"), (.1, .1, .1, .1)],
["blue", (0.42, "red")],
["blue", (.1, .1, .1, .1), ("red", 2)],
[(0, "red"), (1.1, "blue")],
[(0.52, "red"), (0.42, "blue")]])
def test_LinearSegmentedColormap_from_list_invalid_inputs(colors):
with pytest.raises(ValueError):
mcolors.LinearSegmentedColormap.from_list("lsc", colors)
def test_LinearSegmentedColormap_from_list_value_color_tuple():
value_color_tuples = [(0, "red"), (0.6, "blue"), (1, "green")]
cmap = mcolors.LinearSegmentedColormap.from_list("lsc", value_color_tuples, N=11)
assert_array_almost_equal(
cmap([value for value, _ in value_color_tuples]),
to_rgba_array([color for _, color in value_color_tuples]),
)
@image_comparison(['test_norm_abc.png'], remove_text=True,
tol=0 if platform.machine() == 'x86_64' else 0.05)
def test_norm_abc():
class CustomHalfNorm(mcolors.Norm):
def __init__(self):
super().__init__()
@property
def vmin(self):
return 0
@property
def vmax(self):
return 1
@property
def clip(self):
return False
def __call__(self, value, clip=None):
return value / 2
def inverse(self, value):
return 2 * value
def autoscale(self, A):
pass
def autoscale_None(self, A):
pass
def scaled(self):
return True
@property
def n_components(self):
return 1
fig, axes = plt.subplots(2,2)
r = np.linspace(-1, 3, 16*16).reshape((16,16))
norm = CustomHalfNorm()
colorizer = mpl.colorizer.Colorizer(cmap='viridis', norm=norm)
c = axes[0,0].imshow(r, colorizer=colorizer)
axes[0,1].pcolor(r, colorizer=colorizer)
axes[1,0].contour(r, colorizer=colorizer)
axes[1,1].contourf(r, colorizer=colorizer)
def test_close_error_name():
with pytest.raises(
KeyError,
match=(
"'grays' is not a valid value for colormap. "
"Did you mean one of ['gray', 'Grays', 'gray_r']?"
)):
matplotlib.colormaps["grays"]
def test_multi_norm_creation():
# tests for mcolors.MultiNorm
# test wrong input
with pytest.raises(ValueError,
match="MultiNorm must be assigned an iterable"):
mcolors.MultiNorm("linear")
with pytest.raises(ValueError,
match="MultiNorm must be assigned at least one"):
mcolors.MultiNorm([])
with pytest.raises(ValueError,
match="MultiNorm must be assigned an iterable"):
mcolors.MultiNorm(None)
with pytest.raises(ValueError,
match="not a valid"):
mcolors.MultiNorm(["linear", "bad_norm_name"])
with pytest.raises(ValueError,
match="Each norm assigned to MultiNorm"):
mcolors.MultiNorm(["linear", object()])
norm = mpl.colors.MultiNorm(['linear', 'linear'])
def test_multi_norm_call_vmin_vmax():
# test get vmin, vmax
norm = mpl.colors.MultiNorm(['linear', 'log'])
norm.vmin = (1, 1)
norm.vmax = (2, 2)
assert norm.vmin == (1, 1)
assert norm.vmax == (2, 2)
with pytest.raises(ValueError, match="Expected an iterable of length 2"):
norm.vmin = 1
with pytest.raises(ValueError, match="Expected an iterable of length 2"):
norm.vmax = 1
with pytest.raises(ValueError, match="Expected an iterable of length 2"):
norm.vmin = (1, 2, 3)
with pytest.raises(ValueError, match="Expected an iterable of length 2"):
norm.vmax = (1, 2, 3)
def test_multi_norm_call_clip_inverse():
# test get vmin, vmax
norm = mpl.colors.MultiNorm(['linear', 'log'])
norm.vmin = (1, 1)
norm.vmax = (2, 2)
# test call with clip
assert_array_equal(norm([3, 3], clip=[False, False]), [2.0, 1.584962500721156])
assert_array_equal(norm([3, 3], clip=[True, True]), [1.0, 1.0])
assert_array_equal(norm([3, 3], clip=[True, False]), [1.0, 1.584962500721156])
norm.clip = [False, False]
assert_array_equal(norm([3, 3]), [2.0, 1.584962500721156])
norm.clip = [True, True]
assert_array_equal(norm([3, 3]), [1.0, 1.0])
norm.clip = [True, False]
assert_array_equal(norm([3, 3]), [1.0, 1.584962500721156])
norm.clip = [True, True]
with pytest.raises(ValueError, match="Expected an iterable of length 2"):
norm.clip = True
with pytest.raises(ValueError, match="Expected an iterable of length 2"):
norm.clip = [True, False, True]
with pytest.raises(ValueError, match="Expected an iterable of length 2"):
norm([3, 3], clip=True)
with pytest.raises(ValueError, match="Expected an iterable of length 2"):
norm([3, 3], clip=[True, True, True])
# test inverse
assert_array_almost_equal(norm.inverse([0.5, 0.5849625007211562]), [1.5, 1.5])
def test_multi_norm_autoscale():
norm = mpl.colors.MultiNorm(['linear', 'log'])
# test autoscale
norm.autoscale([[0, 1, 2, 3], [0.1, 1, 2, 3]])
assert_array_equal(norm.vmin, [0, 0.1])
assert_array_equal(norm.vmax, [3, 3])
# test autoscale_none
norm0 = mcolors.TwoSlopeNorm(2, vmin=0, vmax=None)
norm = mcolors.MultiNorm([norm0, 'linear'], vmax=[None, 50])
norm.autoscale_None([[1, 2, 3, 4, 5], [-50, 1, 0, 1, 500]])
assert_array_equal(norm([5, 0]), [1, 0.5])
assert_array_equal(norm.vmin, (0, -50))
assert_array_equal(norm.vmax, (5, 50))
def test_mult_norm_call_types():
mn = mpl.colors.MultiNorm(['linear', 'linear'])
mn.vmin = (-2, -2)
mn.vmax = (2, 2)
vals = np.arange(6).reshape((3,2))
target = np.ma.array([(0.5, 0.75),
(1., 1.25),
(1.5, 1.75)])
# test structured array as input
from_mn = mn(rfn.unstructured_to_structured(vals))
assert_array_almost_equal(from_mn,
target.T)
# test list of arrays as input
assert_array_almost_equal(mn(list(vals.T)),
list(target.T))
# test list of floats as input
assert_array_almost_equal(mn(list(vals[0])),
list(target[0]))
# test tuple of arrays as input
assert_array_almost_equal(mn(tuple(vals.T)),
list(target.T))
# np.arrays of shapes that are compatible
assert_array_almost_equal(mn(np.zeros(2)),
0.5*np.ones(2))
assert_array_almost_equal(mn(np.zeros((2, 3))),
0.5*np.ones((2, 3)))
assert_array_almost_equal(mn(np.zeros((2, 3, 4))),
0.5*np.ones((2, 3, 4)))
# test with NoNorm, list as input
mn_no_norm = mpl.colors.MultiNorm(['linear', mcolors.NoNorm()])
no_norm_out = mn_no_norm(list(vals.T))
assert_array_almost_equal(no_norm_out,
[[0., 0.5, 1.],
[1, 3, 5]])
assert no_norm_out[0].dtype == np.dtype('float64')
assert no_norm_out[1].dtype == vals.dtype
# test with NoNorm, structured array as input
mn_no_norm = mpl.colors.MultiNorm(['linear', mcolors.NoNorm()])
no_norm_out = mn_no_norm(rfn.unstructured_to_structured(vals))
assert_array_almost_equal(no_norm_out,
[[0., 0.5, 1.],
[1, 3, 5]])
# test single int as input
with pytest.raises(ValueError,
match="component as input, but got 1 instead"):
mn(1)
# test list of incompatible size
with pytest.raises(ValueError,
match="but got a sequence with 3 elements"):
mn([3, 2, 1])
# last axis matches, len(data.shape) > 2
with pytest.raises(ValueError,
match=(r"`data_as_list = \[data\[..., i\] for i in "
r"range\(data.shape\[-1\]\)\]`")):
mn(np.zeros((3, 3, 2)))
# last axis matches, len(data.shape) == 2
with pytest.raises(ValueError,
match=r"You can use `data_transposed = data.T` to convert"):
mn(np.zeros((3, 2)))
# incompatible arrays where no relevant axis matches
for data in [np.zeros(3), np.zeros((3, 2, 3))]:
with pytest.raises(ValueError,
match=r"but got a sequence with 3 elements"):
mn(data)
# test incompatible class
with pytest.raises(ValueError,
match="but got <object object"):
mn(object())
def test_ensure_multivariate_data():
# text complex input
for dtype, target in zip(["complex64", "complex128"], [np.float32, np.float64]):
data = np.arange(12).reshape((4, 3)).astype(dtype)
mdata = mcolorizer._ensure_multivariate_data(data, 2)
assert mdata.shape == (4, 3)
assert mdata.dtype.fields['f0'][0] == target
assert mdata.dtype.fields['f1'][0] == target
assert_array_almost_equal(mdata["f0"], np.arange(12).reshape((4, 3)))
assert_array_almost_equal(mdata["f1"], np.zeros(12).reshape((4, 3)))
# test complex masked
data = np.arange(12).reshape((4, 3)).astype('complex128')
data = np.ma.masked_where(data > 5, data)
mdata = mcolorizer._ensure_multivariate_data(data, 2)
assert np.all(mdata["f0"].mask[:2] == 0)
assert np.all(mdata["f0"].mask[2:] == 1)
assert np.all(mdata["f1"].mask[:2] == 0)
assert np.all(mdata["f1"].mask[2:] == 1)
# test tuple of data
data = [0, 1]
mdata = mcolorizer._ensure_multivariate_data(data, 2)
assert mdata.shape == ()
# test wrong input size
data = [[0, 1]]
with pytest.raises(ValueError, match="must contain complex numbers"):
mcolorizer._ensure_multivariate_data(data, 2)
data = [[0, 1]]
with pytest.raises(ValueError, match="have a first dimension 3"):
mcolorizer._ensure_multivariate_data(data, 3)
# test input of ints as list of lists
data = [[0, 0, 0], [1, 1, 1]]
mdata = mcolorizer._ensure_multivariate_data(data, 2)
assert mdata.shape == (3,)
assert mdata.dtype.fields['f0'][0] == np.int64
assert mdata.dtype.fields['f1'][0] == np.int64
# test input of floats, ints as tuple of lists
data = ([0.0, 0.0], [1, 1])
mdata = mcolorizer._ensure_multivariate_data(data, 2)
assert mdata.shape == (2,)
assert mdata.dtype.fields['f0'][0] == np.float64
assert mdata.dtype.fields['f1'][0] == np.int64
# test input of array of floats
data = np.array([[0.0, 0, 0], [1, 1, 1]])
mdata = mcolorizer._ensure_multivariate_data(data, 2)
assert mdata.shape == (3,)
assert mdata.dtype.fields['f0'][0] == np.float64
assert mdata.dtype.fields['f1'][0] == np.float64
# test more input dims
data = np.zeros((3, 4, 5, 6))
mdata = mcolorizer._ensure_multivariate_data(data, 3)
assert mdata.shape == (4, 5, 6)
def test_colorizer_multinorm_implicit():
ca = mcolorizer.Colorizer('BiOrangeBlue')
ca.vmin = (0, 0)
ca.vmax = (1, 1)
# test call with two single values
data = [0.1, 0.2]
res = (0.10009765625, 0.1510859375, 0.20166015625, 1.0)
assert_array_almost_equal(ca.to_rgba(data), res)
# test call with two 1d arrays
data = [[0.1, 0.2], [0.3, 0.4]]
res = [[0.10009766, 0.19998877, 0.29931641, 1.],
[0.20166016, 0.30098633, 0.40087891, 1.]]
assert_array_almost_equal(ca.to_rgba(data), res)
# test call with two 2d arrays
data = [np.linspace(0, 1, 12).reshape(3, 4),
np.linspace(1, 0, 12).reshape(3, 4)]
res = np.array([[[0.00244141, 0.50048437, 0.99853516, 1.],
[0.09228516, 0.50048437, 0.90869141, 1.],
[0.18212891, 0.50048437, 0.81884766, 1.],
[0.27197266, 0.50048437, 0.72900391, 1.]],
[[0.36572266, 0.50048437, 0.63525391, 1.],
[0.45556641, 0.50048438, 0.54541016, 1.],
[0.54541016, 0.50048438, 0.45556641, 1.],
[0.63525391, 0.50048437, 0.36572266, 1.]],
[[0.72900391, 0.50048437, 0.27197266, 1.],
[0.81884766, 0.50048437, 0.18212891, 1.],
[0.90869141, 0.50048437, 0.09228516, 1.],
[0.99853516, 0.50048437, 0.00244141, 1.]]])
assert_array_almost_equal(ca.to_rgba(data), res)
with pytest.raises(ValueError, match=("This MultiNorm has 2 components, "
"but got a sequence with 3 elements")):
ca.to_rgba([0.1, 0.2, 0.3])
with pytest.raises(ValueError, match=("This MultiNorm has 2 components, "
"but got a sequence with 1 elements")):
ca.to_rgba([[0.1]])
# test multivariate
ca = mcolorizer.Colorizer('3VarAddA')
ca.vmin = (-0.1, -0.2, -0.3)
ca.vmax = (0.1, 0.2, 0.3)
data = [0.1, 0.1, 0.1]
res = (0.712612, 0.896847, 0.954494, 1.0)
assert_array_almost_equal(ca.to_rgba(data), res)
def test_colorizer_multinorm_explicit():
with pytest.raises(ValueError, match="MultiNorm must be assigned"):
ca = mcolorizer.Colorizer('BiOrangeBlue', 'linear')
with pytest.raises(TypeError,
match=("'norm' must be an instance of matplotlib.colors.Norm"
", str or None, not a list")):
ca = mcolorizer.Colorizer('viridis', ['linear', 'linear'])
with pytest.raises(ValueError,
match=("Invalid norm for multivariate colormap with 2 inputs")):
ca = mcolorizer.Colorizer('BiOrangeBlue', ['linear', 'linear', 'log'])
# valid explicit construction
ca = mcolorizer.Colorizer('BiOrangeBlue', [mcolors.Normalize(), 'log'])
ca.vmin = (0, 0.01)
ca.vmax = (1, 1)
# test call with two single values
data = [0.1, 0.2]
res = (0.100098, 0.375492, 0.650879, 1.)
assert_array_almost_equal(ca.to_rgba(data), res)
def test_invalid_cmap_n_components_zero():
class CustomColormap(mcolors.Colormap):
def __init__(self):
super().__init__("custom")
self.n_variates = 0
with pytest.raises(ValueError, match='`n_variates` >= 1'):
ca = mcolorizer.Colorizer(CustomColormap())
def test_colorizer_bivar_cmap():
ca = mcolorizer.Colorizer('BiOrangeBlue', [mcolors.Normalize(), 'log'])
with pytest.raises(ValueError, match='The colormap viridis'):
ca.cmap = 'viridis'
cartist = mcolorizer.ColorizingArtist(ca)
cartist.set_array(np.zeros((2, 4, 4)))
with pytest.raises(ValueError, match='Invalid data entry for multivariate'):
cartist.set_array(np.zeros((3, 4, 4)))
dt = np.dtype([('x', 'f4'), ('', 'object')])
with pytest.raises(TypeError, match='converted to a sequence of floats'):
cartist.set_array(np.zeros((2, 4, 4), dtype=dt))
with pytest.raises(ValueError, match='all variates must have same shape'):
cartist.set_array((np.zeros(3), np.zeros(4)))
# ensure masked value is propagated from input
a = np.arange(3)
cartist.set_array((a, np.ma.masked_where(a > 1, a)))
assert np.all(cartist.get_array()['f0'].mask == np.array([0, 0, 0], dtype=bool))
assert np.all(cartist.get_array()['f1'].mask == np.array([0, 0, 1], dtype=bool))
# test clearing data
cartist.set_array(None)
cartist.get_array() is None
def test_colorizer_multivar_cmap():
ca = mcolorizer.Colorizer('3VarAddA', [mcolors.Normalize(),
mcolors.Normalize(),
'log'])
cartist = mcolorizer.ColorizingArtist(ca)
cartist.set_array(np.zeros((3, 5, 5)))
with pytest.raises(ValueError, match='Complex numbers are incompatible with'):
cartist.set_array(np.zeros((5, 5), dtype='complex128'))
| TestAsinhNorm |
python | spack__spack | lib/spack/spack/test/llnl/util/lock.py | {
"start": 26203,
"end": 37441
} | class ____(lk.Lock):
"""Test lock class that marks acquire/release events."""
def __init__(self, lock_path, vals):
super().__init__(lock_path)
self.vals = vals
# assert hooks for subclasses
assert_acquire_read = lambda self: None
assert_acquire_write = lambda self: None
assert_release_read = lambda self: None
assert_release_write = lambda self: None
def acquire_read(self, timeout=None):
self.assert_acquire_read()
result = super().acquire_read(timeout)
self.vals["acquired_read"] = True
return result
def acquire_write(self, timeout=None):
self.assert_acquire_write()
result = super().acquire_write(timeout)
self.vals["acquired_write"] = True
return result
def release_read(self, release_fn=None):
self.assert_release_read()
result = super().release_read(release_fn)
self.vals["released_read"] = True
return result
def release_write(self, release_fn=None):
self.assert_release_write()
result = super().release_write(release_fn)
self.vals["released_write"] = True
return result
@pytest.mark.parametrize(
"transaction,type", [(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals["entered_fn"]
assert not vals["exited_fn"]
def assert_release_read(self):
assert vals["entered_fn"]
assert not vals["exited_fn"]
def assert_acquire_write(self):
assert not vals["entered_fn"]
assert not vals["exited_fn"]
def assert_release_write(self):
assert vals["entered_fn"]
assert not vals["exited_fn"]
def enter_fn():
# assert enter_fn is called while lock is held
assert vals["acquired_%s" % type]
vals["entered_fn"] = True
def exit_fn(t, v, tb):
# assert exit_fn is called while lock is held
assert not vals["released_%s" % type]
vals["exited_fn"] = True
vals["exception"] = t or v or tb
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with transaction(lock, acquire=enter_fn, release=exit_fn):
assert vals["acquired_%s" % type]
assert not vals["released_%s" % type]
assert vals["entered_fn"]
assert vals["exited_fn"]
assert vals["acquired_%s" % type]
assert vals["released_%s" % type]
assert not vals["exception"]
@pytest.mark.parametrize(
"transaction,type", [(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction_with_exception(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals["entered_fn"]
assert not vals["exited_fn"]
def assert_release_read(self):
assert vals["entered_fn"]
assert not vals["exited_fn"]
def assert_acquire_write(self):
assert not vals["entered_fn"]
assert not vals["exited_fn"]
def assert_release_write(self):
assert vals["entered_fn"]
assert not vals["exited_fn"]
def enter_fn():
assert vals["acquired_%s" % type]
vals["entered_fn"] = True
def exit_fn(t, v, tb):
assert not vals["released_%s" % type]
vals["exited_fn"] = True
vals["exception"] = t or v or tb
return exit_result
exit_result = False
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with pytest.raises(Exception):
with transaction(lock, acquire=enter_fn, release=exit_fn):
raise Exception()
assert vals["entered_fn"]
assert vals["exited_fn"]
assert vals["exception"]
# test suppression of exceptions from exit_fn
exit_result = True
vals.clear()
# should not raise now.
with transaction(lock, acquire=enter_fn, release=exit_fn):
raise Exception()
assert vals["entered_fn"]
assert vals["exited_fn"]
assert vals["exception"]
@pytest.mark.parametrize(
"transaction,type", [(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction_with_context_manager(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals["entered_ctx"]
assert not vals["exited_ctx"]
def assert_release_read(self):
assert vals["entered_ctx"]
assert vals["exited_ctx"]
def assert_acquire_write(self):
assert not vals["entered_ctx"]
assert not vals["exited_ctx"]
def assert_release_write(self):
assert vals["entered_ctx"]
assert vals["exited_ctx"]
class TestContextManager:
def __enter__(self):
vals["entered_ctx"] = True
def __exit__(self, t, v, tb):
assert not vals["released_%s" % type]
vals["exited_ctx"] = True
vals["exception_ctx"] = t or v or tb
return exit_ctx_result
def exit_fn(t, v, tb):
assert not vals["released_%s" % type]
vals["exited_fn"] = True
vals["exception_fn"] = t or v or tb
return exit_fn_result
exit_fn_result, exit_ctx_result = False, False
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with transaction(lock, acquire=TestContextManager, release=exit_fn):
pass
assert vals["entered_ctx"]
assert vals["exited_ctx"]
assert vals["exited_fn"]
assert not vals["exception_ctx"]
assert not vals["exception_fn"]
vals.clear()
with transaction(lock, acquire=TestContextManager):
pass
assert vals["entered_ctx"]
assert vals["exited_ctx"]
assert not vals["exited_fn"]
assert not vals["exception_ctx"]
assert not vals["exception_fn"]
# below are tests for exceptions with and without suppression
def assert_ctx_and_fn_exception(raises=True):
vals.clear()
if raises:
with pytest.raises(Exception):
with transaction(lock, acquire=TestContextManager, release=exit_fn):
raise Exception()
else:
with transaction(lock, acquire=TestContextManager, release=exit_fn):
raise Exception()
assert vals["entered_ctx"]
assert vals["exited_ctx"]
assert vals["exited_fn"]
assert vals["exception_ctx"]
assert vals["exception_fn"]
def assert_only_ctx_exception(raises=True):
vals.clear()
if raises:
with pytest.raises(Exception):
with transaction(lock, acquire=TestContextManager):
raise Exception()
else:
with transaction(lock, acquire=TestContextManager):
raise Exception()
assert vals["entered_ctx"]
assert vals["exited_ctx"]
assert not vals["exited_fn"]
assert vals["exception_ctx"]
assert not vals["exception_fn"]
# no suppression
assert_ctx_and_fn_exception(raises=True)
assert_only_ctx_exception(raises=True)
# suppress exception only in function
exit_fn_result, exit_ctx_result = True, False
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=True)
# suppress exception only in context
exit_fn_result, exit_ctx_result = False, True
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=False)
# suppress exception in function and context
exit_fn_result, exit_ctx_result = True, True
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=False)
def test_nested_write_transaction(lock_path):
"""Ensure that the outermost write transaction writes."""
def write(t, v, tb):
vals["wrote"] = True
vals = collections.defaultdict(lambda: False)
lock = AssertLock(lock_path, vals)
# write/write
with lk.WriteTransaction(lock, release=write):
assert not vals["wrote"]
with lk.WriteTransaction(lock, release=write):
assert not vals["wrote"]
assert not vals["wrote"]
assert vals["wrote"]
# read/write
vals.clear()
with lk.ReadTransaction(lock):
assert not vals["wrote"]
with lk.WriteTransaction(lock, release=write):
assert not vals["wrote"]
assert vals["wrote"]
# write/read/write
vals.clear()
with lk.WriteTransaction(lock, release=write):
assert not vals["wrote"]
with lk.ReadTransaction(lock):
assert not vals["wrote"]
with lk.WriteTransaction(lock, release=write):
assert not vals["wrote"]
assert not vals["wrote"]
assert not vals["wrote"]
assert vals["wrote"]
# read/write/read/write
vals.clear()
with lk.ReadTransaction(lock):
with lk.WriteTransaction(lock, release=write):
assert not vals["wrote"]
with lk.ReadTransaction(lock):
assert not vals["wrote"]
with lk.WriteTransaction(lock, release=write):
assert not vals["wrote"]
assert not vals["wrote"]
assert not vals["wrote"]
assert vals["wrote"]
def test_nested_reads(lock_path):
"""Ensure that write transactions won't re-read data."""
def read():
vals["read"] += 1
vals = collections.defaultdict(lambda: 0)
lock = AssertLock(lock_path, vals)
# read/read
vals.clear()
assert vals["read"] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals["read"] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals["read"] == 1
# write/write
vals.clear()
assert vals["read"] == 0
with lk.WriteTransaction(lock, acquire=read):
assert vals["read"] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals["read"] == 1
# read/write
vals.clear()
assert vals["read"] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals["read"] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals["read"] == 1
# write/read/write
vals.clear()
assert vals["read"] == 0
with lk.WriteTransaction(lock, acquire=read):
assert vals["read"] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals["read"] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals["read"] == 1
# read/write/read/write
vals.clear()
assert vals["read"] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals["read"] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals["read"] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals["read"] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals["read"] == 1
| AssertLock |
python | joke2k__faker | tests/providers/test_internet.py | {
"start": 32515,
"end": 32817
} | class ____:
"""Tests for the es_ES locale."""
def test_tld(self, faker):
tld = faker.tld()
assert tld in EsEsInternetProvider.tlds
def test_slug(self, faker):
num_of_samples = 100
for _ in range(num_of_samples):
assert faker.slug() != ""
| TestEsEs |
python | run-llama__llama_index | llama-index-core/llama_index/core/data_structs/data_structs.py | {
"start": 5189,
"end": 6440
} | class ____(IndexStruct):
"""A simple dictionary of documents."""
# TODO: slightly deprecated, should likely be a list or set now
# mapping from vector store id to node doc_id
nodes_dict: Dict[str, str] = field(default_factory=dict)
# TODO: deprecated, not used
# mapping from node doc_id to vector store id
doc_id_dict: Dict[str, List[str]] = field(default_factory=dict)
# TODO: deprecated, not used
# this should be empty for all other indices
embeddings_dict: Dict[str, List[float]] = field(default_factory=dict)
def add_node(
self,
node: BaseNode,
text_id: Optional[str] = None,
) -> str:
"""Add text to table, return current position in list."""
# # don't worry about child indices for now, nodes are all in order
# self.nodes_dict[int_id] = node
vector_id = text_id if text_id is not None else node.node_id
self.nodes_dict[vector_id] = node.node_id
return vector_id
def delete(self, doc_id: str) -> None:
"""Delete a Node."""
del self.nodes_dict[doc_id]
@classmethod
def get_type(cls) -> IndexStructType:
"""Get type."""
return IndexStructType.VECTOR_STORE
@dataclass
| IndexDict |
python | keon__algorithms | tests/test_dp.py | {
"start": 6358,
"end": 7551
} | class ____(unittest.TestCase):
def test_none_0(self):
s = ""
p = ""
self.assertTrue(regex_matching.is_match(s, p))
def test_none_1(self):
s = ""
p = "a"
self.assertFalse(regex_matching.is_match(s, p))
def test_no_symbol_equal(self):
s = "abcd"
p = "abcd"
self.assertTrue(regex_matching.is_match(s, p))
def test_no_symbol_not_equal_0(self):
s = "abcd"
p = "efgh"
self.assertFalse(regex_matching.is_match(s, p))
def test_no_symbol_not_equal_1(self):
s = "ab"
p = "abb"
self.assertFalse(regex_matching.is_match(s, p))
def test_symbol_0(self):
s = ""
p = "a*"
self.assertTrue(regex_matching.is_match(s, p))
def test_symbol_1(self):
s = "a"
p = "ab*"
self.assertTrue(regex_matching.is_match(s, p))
def test_symbol_2(self):
# E.g.
# s a b b
# p 1 0 0 0
# a 0 1 0 0
# b 0 0 1 0
# * 0 1 1 1
s = "abb"
p = "ab*"
self.assertTrue(regex_matching.is_match(s, p))
if __name__ == '__main__':
unittest.main()
| TestRegexMatching |
python | celery__celery | t/unit/backends/test_base.py | {
"start": 54216,
"end": 61486
} | class ____:
def test_should_retry_exception(self):
assert not BaseBackend(app=self.app).exception_safe_to_retry(Exception("test"))
def test_get_failed_never_retries(self):
self.app.conf.result_backend_always_retry, prev = False, self.app.conf.result_backend_always_retry
expected_exc = Exception("failed")
try:
b = BaseBackend(app=self.app)
b.exception_safe_to_retry = lambda exc: True
b._sleep = Mock()
b._get_task_meta_for = Mock()
b._get_task_meta_for.side_effect = [
expected_exc,
{'status': states.SUCCESS, 'result': 42}
]
try:
b.get_task_meta(sentinel.task_id)
assert False
except Exception as exc:
assert b._sleep.call_count == 0
assert exc == expected_exc
finally:
self.app.conf.result_backend_always_retry = prev
def test_get_with_retries(self):
self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry
try:
b = BaseBackend(app=self.app)
b.exception_safe_to_retry = lambda exc: True
b._sleep = Mock()
b._get_task_meta_for = Mock()
b._get_task_meta_for.side_effect = [
Exception("failed"),
{'status': states.SUCCESS, 'result': 42}
]
res = b.get_task_meta(sentinel.task_id)
assert res == {'status': states.SUCCESS, 'result': 42}
assert b._sleep.call_count == 1
finally:
self.app.conf.result_backend_always_retry = prev
def test_get_reaching_max_retries(self):
self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry
self.app.conf.result_backend_max_retries, prev_max_retries = 0, self.app.conf.result_backend_max_retries
try:
b = BaseBackend(app=self.app)
b.exception_safe_to_retry = lambda exc: True
b._sleep = Mock()
b._get_task_meta_for = Mock()
b._get_task_meta_for.side_effect = [
Exception("failed"),
{'status': states.SUCCESS, 'result': 42}
]
try:
b.get_task_meta(sentinel.task_id)
assert False
except BackendGetMetaError:
assert b._sleep.call_count == 0
finally:
self.app.conf.result_backend_always_retry = prev
self.app.conf.result_backend_max_retries = prev_max_retries
def test_get_unsafe_exception(self):
self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry
expected_exc = Exception("failed")
try:
b = BaseBackend(app=self.app)
b._sleep = Mock()
b._get_task_meta_for = Mock()
b._get_task_meta_for.side_effect = [
expected_exc,
{'status': states.SUCCESS, 'result': 42}
]
try:
b.get_task_meta(sentinel.task_id)
assert False
except Exception as exc:
assert b._sleep.call_count == 0
assert exc == expected_exc
finally:
self.app.conf.result_backend_always_retry = prev
def test_store_result_never_retries(self):
self.app.conf.result_backend_always_retry, prev = False, self.app.conf.result_backend_always_retry
expected_exc = Exception("failed")
try:
b = BaseBackend(app=self.app)
b.exception_safe_to_retry = lambda exc: True
b._sleep = Mock()
b._get_task_meta_for = Mock()
b._get_task_meta_for.return_value = {
'status': states.RETRY,
'result': {
"exc_type": "Exception",
"exc_message": ["failed"],
"exc_module": "builtins",
},
}
b._store_result = Mock()
b._store_result.side_effect = [
expected_exc,
42
]
try:
b.store_result(sentinel.task_id, 42, states.SUCCESS)
except Exception as exc:
assert b._sleep.call_count == 0
assert exc == expected_exc
finally:
self.app.conf.result_backend_always_retry = prev
def test_store_result_with_retries(self):
self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry
try:
b = BaseBackend(app=self.app)
b.exception_safe_to_retry = lambda exc: True
b._sleep = Mock()
b._get_task_meta_for = Mock()
b._get_task_meta_for.return_value = {
'status': states.RETRY,
'result': {
"exc_type": "Exception",
"exc_message": ["failed"],
"exc_module": "builtins",
},
}
b._store_result = Mock()
b._store_result.side_effect = [
Exception("failed"),
42
]
res = b.store_result(sentinel.task_id, 42, states.SUCCESS)
assert res == 42
assert b._sleep.call_count == 1
finally:
self.app.conf.result_backend_always_retry = prev
def test_store_result_reaching_max_retries(self):
self.app.conf.result_backend_always_retry, prev = True, self.app.conf.result_backend_always_retry
self.app.conf.result_backend_max_retries, prev_max_retries = 0, self.app.conf.result_backend_max_retries
try:
b = BaseBackend(app=self.app)
b.exception_safe_to_retry = lambda exc: True
b._sleep = Mock()
b._get_task_meta_for = Mock()
b._get_task_meta_for.return_value = {
'status': states.RETRY,
'result': {
"exc_type": "Exception",
"exc_message": ["failed"],
"exc_module": "builtins",
},
}
b._store_result = Mock()
b._store_result.side_effect = [
Exception("failed"),
42
]
try:
b.store_result(sentinel.task_id, 42, states.SUCCESS)
assert False
except BackendStoreError:
assert b._sleep.call_count == 0
finally:
self.app.conf.result_backend_always_retry = prev
self.app.conf.result_backend_max_retries = prev_max_retries
def test_result_backend_thread_safe(self):
# Should identify the backend as thread safe
self.app.conf.result_backend_thread_safe = True
b = BaseBackend(app=self.app)
assert b.thread_safe is True
def test_result_backend_not_thread_safe(self):
# Should identify the backend as not being thread safe
self.app.conf.result_backend_thread_safe = False
b = BaseBackend(app=self.app)
assert b.thread_safe is False
| test_backend_retries |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/data/parsing.py | {
"start": 738,
"end": 3468
} | class ____:
"""
Represents a parser which is responsible for generating queries given a list of MQLQuery(s).
"""
def __init__(
self,
projects: Sequence[Project],
environments: Sequence[Environment],
mql_queries: Sequence[MQLQuery],
):
self._projects = projects
self._environments = environments
self._mql_queries = mql_queries
def _parse_mql(self, mql: str) -> VisitableQueryExpression:
"""
Parses the field with the MQL grammar.
Returns:
A VisitableQueryExpression that wraps the AST generated by the query string and allows visitors to
be applied on top.
"""
try:
query = parse_mql(mql)
except InvalidMQLQueryError as e:
metrics.incr(key="ddm.metrics_api.parsing.error")
cause = e.__cause__
if cause and isinstance(cause, IncompleteParseError):
error_context = cause.text[cause.pos : cause.pos + 20]
# We expose the entire MQL string to give more context when solving the error, since in the future we
# expect that MQL will be directly fed into the endpoint instead of being built from the supplied
# fields.
raise InvalidMetricsQueryError(
f"The query '{mql}' could not be matched starting from '{error_context}...'"
) from e
raise InvalidMetricsQueryError("The supplied query is not valid") from e
return VisitableQueryExpression(query=query)
def generate_queries(
self,
) -> Generator[tuple[QueryExpression, QueryOrder | None, int | None]]:
"""
Generates multiple queries given a base query.
Returns:
A generator which can be used to obtain a query to execute and its details.
"""
for mql_query in self._mql_queries:
compiled_mql_query = mql_query.compile()
query_expression = (
self._parse_mql(compiled_mql_query.mql)
# We validate the query.
.add_visitor(QueryValidationV2Visitor())
# We inject the environment filter in each timeseries.
.add_visitor(EnvironmentsInjectionVisitor(self._environments))
# We transform all `release:latest` filters into the actual latest releases.
.add_visitor(
QueryConditionsCompositeVisitor(
LatestReleaseTransformationVisitor(self._projects)
)
).get()
)
yield query_expression, compiled_mql_query.order, compiled_mql_query.limit
| QueryParser |
python | eth-brownie__brownie | brownie/_gui/source.py | {
"start": 5615,
"end": 8676
} | class ____(tk.Frame):
def __init__(self, root, text, suffix):
super().__init__(root)
self._text = tk.Text(self, width=90, yscrollcommand=self._text_scroll)
self._scroll = ttk.Scrollbar(self)
self._scroll.pack(side="left", fill="y")
self._scroll.config(command=self._scrollbar_scroll)
self._line_no = tk.Text(self, width=4, yscrollcommand=self._text_scroll)
self._line_no.pack(side="left", fill="y")
self._text.pack(side="right", fill="y")
self._text.insert(1.0, text)
for k, v in TEXT_COLORS.items():
self._text.tag_config(k, **v)
if suffix == ".sol":
pattern = r"((?:\s*\/\/[^\n]*)|(?:\/\*[\s\S]*?\*\/))"
else:
pattern = r"((#[^\n]*\n)|(\"\"\"[\s\S]*?\"\"\")|('''[\s\S]*?'''))"
for match in regex_finditer(pattern, text):
self.tag_add("comment", match.start(), match.end())
self._line_no.insert(1.0, "\n".join(str(i) for i in range(1, text.count("\n") + 2)))
self._line_no.tag_configure("justify", justify="right")
self._line_no.tag_add("justify", 1.0, "end")
for text in (self._line_no, self._text):
text.config(**TEXT_STYLE)
text.config(tabs=tkFont.Font(font=text["font"]).measure(" "), wrap="none")
self._line_no.config(background="#272727")
self._text.bind("<ButtonRelease-1>", root._search)
def __getattr__(self, attr):
return getattr(self._text, attr)
def config(self, **kwargs):
self._text.config(**kwargs)
self._line_no.config(**kwargs)
def clear_highlight(self):
self._text.tag_remove("sel", 1.0, "end")
def highlight(self, start, end):
self.clear_highlight()
self.tag_add("sel", start, end, True)
def tag_add(self, tag, start, end, see=False):
start = self._offset_to_coord(start)
if type(end) is not str:
end = self._offset_to_coord(end)
self._text.tag_add(tag, start, end)
if see:
self._text.see(end)
self._text.see(start)
def tag_ranges(self, tag):
return [self._coord_to_offset(i.string) for i in self._text.tag_ranges(tag)]
def tag_remove(self, tag):
self._text.tag_remove(tag, 1.0, "end")
def _offset_to_coord(self, value):
text = self._text.get(1.0, "end")
line = text[:value].count("\n") + 1
offset = len(text[:value].split("\n")[-1])
return f"{line}.{offset}"
def _coord_to_offset(self, value):
row, col = [int(i) for i in value.split(".")]
text = self._text.get(1.0, "end").split("\n")
return sum(len(i) + 1 for i in text[: row - 1]) + col
def _scrollbar_scroll(self, action, position, type=None):
self._text.yview_moveto(position)
self._line_no.yview_moveto(position)
def _text_scroll(self, first, last, type=None):
self._text.yview_moveto(first)
self._line_no.yview_moveto(first)
self._scroll.set(first, last)
| SourceFrame |
python | django__django | tests/model_fields/test_foreignkey.py | {
"start": 320,
"end": 5921
} | class ____(TestCase):
def test_callable_default(self):
"""A lazy callable may be used for ForeignKey.default."""
a = Foo.objects.create(id=1, a="abc", d=Decimal("12.34"))
b = Bar.objects.create(b="bcd")
self.assertEqual(b.a, a)
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_empty_string_fk(self):
"""
Empty strings foreign key values don't get converted to None (#19299).
"""
char_model_empty = PrimaryKeyCharModel.objects.create(string="")
fk_model_empty = FkToChar.objects.create(out=char_model_empty)
fk_model_empty = FkToChar.objects.select_related("out").get(
id=fk_model_empty.pk
)
self.assertEqual(fk_model_empty.out, char_model_empty)
@isolate_apps("model_fields")
def test_warning_when_unique_true_on_fk(self):
class Foo(models.Model):
pass
class FKUniqueTrue(models.Model):
fk_field = models.ForeignKey(Foo, models.CASCADE, unique=True)
model = FKUniqueTrue()
expected_warnings = [
checks.Warning(
"Setting unique=True on a ForeignKey has the same effect as using a "
"OneToOneField.",
hint=(
"ForeignKey(unique=True) is usually better served by a "
"OneToOneField."
),
obj=FKUniqueTrue.fk_field.field,
id="fields.W342",
)
]
warnings = model.check()
self.assertEqual(warnings, expected_warnings)
def test_related_name_converted_to_text(self):
rel_name = Bar._meta.get_field("a").remote_field.related_name
self.assertIsInstance(rel_name, str)
def test_abstract_model_pending_operations(self):
"""
Foreign key fields declared on abstract models should not add lazy
relations to resolve relationship declared as string (#24215).
"""
pending_ops_before = list(apps._pending_operations.items())
class AbstractForeignKeyModel(models.Model):
fk = models.ForeignKey("missing.FK", models.CASCADE)
class Meta:
abstract = True
self.assertIs(AbstractForeignKeyModel._meta.apps, apps)
self.assertEqual(
pending_ops_before,
list(apps._pending_operations.items()),
"Pending lookup added for a foreign key on an abstract model",
)
@isolate_apps("model_fields", "model_fields.tests")
def test_abstract_model_app_relative_foreign_key(self):
class AbstractReferent(models.Model):
reference = models.ForeignKey("Referred", on_delete=models.CASCADE)
class Meta:
app_label = "model_fields"
abstract = True
def assert_app_model_resolved(label):
class Referred(models.Model):
class Meta:
app_label = label
class ConcreteReferent(AbstractReferent):
class Meta:
app_label = label
self.assertEqual(
ConcreteReferent._meta.get_field("reference").related_model, Referred
)
assert_app_model_resolved("model_fields")
assert_app_model_resolved("tests")
@isolate_apps("model_fields")
def test_to_python(self):
class Foo(models.Model):
pass
class Bar(models.Model):
fk = models.ForeignKey(Foo, models.CASCADE)
self.assertEqual(Bar._meta.get_field("fk").to_python("1"), 1)
@isolate_apps("model_fields")
def test_fk_to_fk_get_col_output_field(self):
class Foo(models.Model):
pass
class Bar(models.Model):
foo = models.ForeignKey(Foo, models.CASCADE, primary_key=True)
class Baz(models.Model):
bar = models.ForeignKey(Bar, models.CASCADE, primary_key=True)
col = Baz._meta.get_field("bar").get_col("alias")
self.assertIs(col.output_field, Foo._meta.pk)
@isolate_apps("model_fields")
def test_recursive_fks_get_col(self):
class Foo(models.Model):
bar = models.ForeignKey("Bar", models.CASCADE, primary_key=True)
class Bar(models.Model):
foo = models.ForeignKey(Foo, models.CASCADE, primary_key=True)
with self.assertRaisesMessage(ValueError, "Cannot resolve output_field"):
Foo._meta.get_field("bar").get_col("alias")
@isolate_apps("model_fields")
def test_non_local_to_field(self):
class Parent(models.Model):
key = models.IntegerField(unique=True)
class Child(Parent):
pass
class Related(models.Model):
child = models.ForeignKey(Child, on_delete=models.CASCADE, to_field="key")
msg = (
"'model_fields.Related.child' refers to field 'key' which is not "
"local to model 'model_fields.Child'."
)
with self.assertRaisesMessage(FieldError, msg):
Related._meta.get_field("child").related_fields
def test_invalid_to_parameter(self):
msg = (
"ForeignKey(1) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string 'self'"
)
with self.assertRaisesMessage(TypeError, msg):
class MyModel(models.Model):
child = models.ForeignKey(1, models.CASCADE)
def test_class_getitem(self):
self.assertIs(models.ForeignKey["Foo"], models.ForeignKey)
| ForeignKeyTests |
python | wandb__wandb | wandb/vendor/pygments/util.py | {
"start": 809,
"end": 9123
} | class ____(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, string_types):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, string_types):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.match(text)
if m is None:
return False
doctype = m.group(2)
return re.compile(regex, re.I).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""Check if the file looks like it has a html doctype."""
return doctype_matches(text, r'html')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
# Python narrow build compatibility
def _surrogatepair(c):
# Given a unicode character code
# with length greater than 16 bits,
# return the two 16 bit surrogate pair.
# From example D28 of:
# http://www.unicode.org/book/ch03.pdf
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines)
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
| OptionError |
python | jd__tenacity | tests/test_tenacity.py | {
"start": 58672,
"end": 58985
} | class ____(unittest.TestCase):
def test_retry_error_is_pickleable(self):
import pickle
expected = RetryError(last_attempt=123)
pickled = pickle.dumps(expected)
actual = pickle.loads(pickled)
self.assertEqual(expected.last_attempt, actual.last_attempt)
| TestRetryException |
python | django__django | tests/auth_tests/test_management.py | {
"start": 3413,
"end": 3587
} | class ____:
"""
A fake stdin object that pretends to be a TTY to be used in conjunction
with mock_inputs.
"""
def isatty(self):
return True
| MockTTY |
python | sympy__sympy | sympy/physics/quantum/spin.py | {
"start": 9327,
"end": 10541
} | class ____(SpinOpBase, HermitianOperator):
"""The Jy operator."""
_coord = 'y'
basis = 'Jy'
def _eval_commutator_JzOp(self, other):
return I*hbar*JxOp(self.name)
def _eval_commutator_JxOp(self, other):
return -I*hbar*J2Op(self.name)
def _apply_operator_JzKet(self, ket, **options):
jp = JplusOp(self.name)._apply_operator_JzKet(ket, **options)
jm = JminusOp(self.name)._apply_operator_JzKet(ket, **options)
return (jp - jm)/(Integer(2)*I)
def _apply_operator_JzKetCoupled(self, ket, **options):
jp = JplusOp(self.name)._apply_operator_JzKetCoupled(ket, **options)
jm = JminusOp(self.name)._apply_operator_JzKetCoupled(ket, **options)
return (jp - jm)/(Integer(2)*I)
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
jp = JplusOp(self.name)._represent_JzOp(basis, **options)
jm = JminusOp(self.name)._represent_JzOp(basis, **options)
return (jp - jm)/(Integer(2)*I)
def _eval_rewrite_as_plusminus(self, *args, **kwargs):
return (JplusOp(args[0]) - JminusOp(args[0]))/(2*I)
| JyOp |
python | doocs__leetcode | solution/3200-3299/3229.Minimum Operations to Make Array Equal to Target/Solution.py | {
"start": 0,
"end": 439
} | class ____:
def minimumOperations(self, nums: List[int], target: List[int]) -> int:
n = len(nums)
f = abs(target[0] - nums[0])
for i in range(1, n):
x = target[i] - nums[i]
y = target[i - 1] - nums[i - 1]
if x * y > 0:
d = abs(x) - abs(y)
if d > 0:
f += d
else:
f += abs(x)
return f
| Solution |
python | django-crispy-forms__django-crispy-forms | crispy_forms/bootstrap.py | {
"start": 11646,
"end": 13254
} | class ____(Field):
"""
Layout object for rendering radiobuttons inline.
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
attrs : dict
Attributes to be applied to the field. These are converted into html
attributes. e.g. ``data_id: 'test'`` in the attrs dict will become
``data-id='test'`` on the field's ``<input>``.
Parameters
----------
*fields : str
Usually a single field, but can be any number of fields, to be rendered
with the same attributes applied.
css_class : str, optional
CSS classes to be applied to the field. These are added to any classes
included in the ``attrs`` dict. By default ``None``.
wrapper_class: str, optional
CSS classes to be used when rendering the Field. This class is usually
applied to the ``<div>`` which wraps the Field's ``<label>`` and
``<input>`` tags. By default ``None``.
template : str, optional
Overrides the default template, if provided. By default ``None``.
**kwargs : dict, optional
Additional attributes are converted into key="value", pairs. These
attributes are added to the ``<div>``.
Examples
--------
Example::
InlineRadios('field_name')
"""
template = "%s/layout/radioselect_inline.html"
def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):
return super().render(form, context, template_pack=template_pack, extra_context={"inline_class": "inline"})
| InlineRadios |
python | google__pytype | pytype/directors/parser.py | {
"start": 1493,
"end": 1609
} | class ____(LineRange):
name: str
annotations: dict[str, str]
@dataclasses.dataclass(frozen=True)
| _ParamAnnotations |
python | kamyu104__LeetCode-Solutions | Python/count-increasing-quadruplets.py | {
"start": 642,
"end": 1313
} | class ____(object):
def countQuadruplets(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
right = [[0]*(len(nums)+1) for _ in xrange(len(nums))]
for j in xrange(len(nums)):
for i in reversed(xrange(j+1, len(nums))):
right[j][i] = right[j][i+1] + int(nums[i] > nums[j])
result = 0
for k in xrange(len(nums)):
left = 0
for j in xrange(k):
if nums[k] < nums[j]:
result += left*right[j][k+1]
left += int(nums[k] > nums[j])
return result
# Time: O(n^2)
# Space: O(n^2)
# prefix sum
| Solution2 |
python | numba__numba | numba/cuda/tests/cudapy/test_record_dtype.py | {
"start": 2858,
"end": 8725
} | class ____(CUDATestCase):
def _createSampleArrays(self):
self.sample1d = np.recarray(3, dtype=recordtype)
self.samplerec1darr = np.recarray(1, dtype=recordwitharray)[0]
self.samplerec2darr = np.recarray(1, dtype=recordwith2darray)[0]
def setUp(self):
super().setUp()
self._createSampleArrays()
ary = self.sample1d
for i in range(ary.size):
x = i + 1
ary[i]['a'] = x / 2
ary[i]['b'] = x
ary[i]['c'] = x * 1j
ary[i]['d'] = "%d" % x
def get_cfunc(self, pyfunc, argspec):
return cuda.jit()(pyfunc)
def _test_set_equal(self, pyfunc, value, valuetype):
rec = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (rec[:], types.intp, valuetype))
for i in range(self.sample1d.size):
got = self.sample1d.copy()
# Force the argument to the pure Python function to be
# a recarray, as attribute access isn't supported on
# structured arrays.
expect = got.copy().view(np.recarray)
cfunc[1, 1](got, i, value)
pyfunc(expect, i, value)
# Match the entire array to ensure no memory corruption
self.assertTrue(np.all(expect == got))
def test_set_a(self):
self._test_set_equal(set_a, 3.1415, types.float64)
# Test again to check if coercion works
self._test_set_equal(set_a, 3., types.float32)
def test_set_b(self):
self._test_set_equal(set_b, 123, types.int32)
# Test again to check if coercion works
self._test_set_equal(set_b, 123, types.float64)
def test_set_c(self):
self._test_set_equal(set_c, 43j, types.complex64)
# Test again to check if coercion works
self._test_set_equal(set_c, 43j, types.complex128)
def test_set_record(self):
pyfunc = set_record
rec = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (rec[:], types.intp, types.intp))
test_indices = [(0, 1), (1, 2), (0, 2)]
for i, j in test_indices:
expect = self.sample1d.copy()
pyfunc(expect, i, j)
got = self.sample1d.copy()
cfunc[1, 1](got, i, j)
# Match the entire array to ensure no memory corruption
self.assertEqual(expect[i], expect[j])
self.assertEqual(got[i], got[j])
self.assertTrue(np.all(expect == got))
def _test_rec_set(self, v, pyfunc, f):
rec = self.sample1d.copy()[0]
nbrecord = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (nbrecord,))
cfunc[1, 1](rec, v)
np.testing.assert_equal(rec[f], v)
def test_rec_set_a(self):
self._test_rec_set(np.float64(1.5), record_set_a, 'a')
def test_rec_set_b(self):
self._test_rec_set(np.int32(2), record_set_b, 'b')
def test_rec_set_c(self):
self._test_rec_set(np.complex64(4.0 + 5.0j), record_set_c, 'c')
def _test_rec_read(self, v, pyfunc, f):
rec = self.sample1d.copy()[0]
rec[f] = v
arr = np.zeros(1, v.dtype)
nbrecord = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (nbrecord,))
cfunc[1, 1](rec, arr)
np.testing.assert_equal(arr[0], v)
def test_rec_read_a(self):
self._test_rec_read(np.float64(1.5), record_read_a, 'a')
def test_rec_read_b(self):
self._test_rec_read(np.int32(2), record_read_b, 'b')
def test_rec_read_c(self):
self._test_rec_read(np.complex64(4.0 + 5.0j), record_read_c, 'c')
def test_record_write_1d_array(self):
'''
Test writing to a 1D array within a structured type
'''
rec = self.samplerec1darr.copy()
nbrecord = numpy_support.from_dtype(recordwitharray)
cfunc = self.get_cfunc(record_write_array, (nbrecord,))
cfunc[1, 1](rec)
expected = self.samplerec1darr.copy()
expected['g'] = 2
expected['h'][0] = 3.0
expected['h'][1] = 4.0
np.testing.assert_equal(expected, rec)
def test_record_write_2d_array(self):
'''
Test writing to a 2D array within a structured type
'''
rec = self.samplerec2darr.copy()
nbrecord = numpy_support.from_dtype(recordwith2darray)
cfunc = self.get_cfunc(record_write_2d_array, (nbrecord,))
cfunc[1, 1](rec)
expected = self.samplerec2darr.copy()
expected['i'] = 3
expected['j'][:] = np.asarray([5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
np.float32).reshape(3, 2)
np.testing.assert_equal(expected, rec)
def test_record_read_1d_array(self):
'''
Test reading from a 1D array within a structured type
'''
rec = self.samplerec1darr.copy()
rec['h'][0] = 4.0
rec['h'][1] = 5.0
nbrecord = numpy_support.from_dtype(recordwitharray)
cfunc = self.get_cfunc(record_read_array, (nbrecord,))
arr = np.zeros(2, dtype=rec['h'].dtype)
cfunc[1, 1](rec, arr)
np.testing.assert_equal(rec['h'], arr)
def test_record_read_2d_array(self):
'''
Test reading from a 2D array within a structured type
'''
rec = self.samplerec2darr.copy()
rec['j'][:] = np.asarray([5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
np.float32).reshape(3, 2)
nbrecord = numpy_support.from_dtype(recordwith2darray)
cfunc = self.get_cfunc(record_read_2d_array, (nbrecord,))
arr = np.zeros((3,2), dtype=rec['j'].dtype)
cfunc[1, 1](rec, arr)
np.testing.assert_equal(rec['j'], arr)
@skip_on_cudasim('Structured array attr access not supported in simulator')
| TestRecordDtype |
python | django__django | django/core/management/templates.py | {
"start": 585,
"end": 15458
} | class ____(BaseCommand):
"""
Copy either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
requires_system_checks = []
# The supported URL schemes
url_schemes = ["http", "https", "ftp"]
# Rewrite the following suffixes when determining the target filename.
rewrite_template_suffixes = (
# Allow shipping invalid .py files without byte-compilation.
(".py-tpl", ".py"),
)
def add_arguments(self, parser):
parser.add_argument("name", help="Name of the application or project.")
parser.add_argument(
"directory",
nargs="?",
help="Optional destination directory, this will be created if needed.",
)
parser.add_argument(
"--template", help="The path or URL to load the template from."
)
parser.add_argument(
"--extension",
"-e",
dest="extensions",
action="append",
default=["py"],
help='The file extension(s) to render (default: "py"). '
"Separate multiple extensions with commas, or use "
"-e multiple times.",
)
parser.add_argument(
"--name",
"-n",
dest="files",
action="append",
default=[],
help="The file name(s) to render. Separate multiple file names "
"with commas, or use -n multiple times.",
)
parser.add_argument(
"--exclude",
"-x",
action="append",
default=argparse.SUPPRESS,
nargs="?",
const="",
help=(
"The directory name(s) to exclude, in addition to .git and "
"__pycache__. Can be used multiple times."
),
)
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.a_or_an = "an" if app_or_project == "app" else "a"
self.paths_to_remove = []
self.verbosity = options["verbosity"]
self.validate_name(name)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = os.path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except FileExistsError:
raise CommandError("'%s' already exists" % top_dir)
except OSError as e:
raise CommandError(e)
else:
top_dir = os.path.abspath(os.path.expanduser(target))
if app_or_project == "app":
self.validate_name(os.path.basename(top_dir), "directory")
if not os.path.exists(top_dir):
try:
os.makedirs(top_dir)
except OSError as e:
raise CommandError(e)
# Find formatters, which are external executables, before input
# from the templates can sneak into the path.
formatter_paths = find_formatters()
extensions = tuple(handle_extensions(options["extensions"]))
extra_files = []
excluded_directories = [".git", "__pycache__"]
for file in options["files"]:
extra_files.extend(map(lambda x: x.strip(), file.split(",")))
if exclude := options.get("exclude"):
for directory in exclude:
excluded_directories.append(directory.strip())
if self.verbosity >= 2:
self.stdout.write(
"Rendering %s template files with extensions: %s"
% (app_or_project, ", ".join(extensions))
)
self.stdout.write(
"Rendering %s template files with filenames: %s"
% (app_or_project, ", ".join(extra_files))
)
base_name = "%s_name" % app_or_project
base_subdir = "%s_template" % app_or_project
base_directory = "%s_directory" % app_or_project
camel_case_name = "camel_case_%s_name" % app_or_project
camel_case_value = "".join(x for x in name.title() if x != "_")
context = Context(
{
**options,
base_name: name,
base_directory: top_dir,
camel_case_name: camel_case_value,
"docs_version": get_docs_version(),
"django_version": django.__version__,
},
autoescape=False,
)
# Setup a stub settings environment for template rendering
if not settings.configured:
settings.configure()
django.setup()
template_dir = self.handle_template(options["template"], base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = os.path.join(top_dir, relative_dir)
os.makedirs(target_dir, exist_ok=True)
for dirname in dirs[:]:
if "exclude" not in options:
if dirname.startswith(".") or dirname == "__pycache__":
dirs.remove(dirname)
elif dirname in excluded_directories:
dirs.remove(dirname)
for filename in files:
if filename.endswith((".pyo", ".pyc", ".py.class")):
# Ignore some files as they cause various breakages.
continue
old_path = os.path.join(root, filename)
new_path = os.path.join(
top_dir, relative_dir, filename.replace(base_name, name)
)
for old_suffix, new_suffix in self.rewrite_template_suffixes:
if new_path.endswith(old_suffix):
new_path = new_path.removesuffix(old_suffix) + new_suffix
break # Only rewrite once
if os.path.exists(new_path):
raise CommandError(
"%s already exists. Overlaying %s %s into an existing "
"directory won't replace conflicting files."
% (
new_path,
self.a_or_an,
app_or_project,
)
)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
if new_path.endswith(extensions) or filename in extra_files:
with open(old_path, encoding="utf-8") as template_file:
content = template_file.read()
template = Engine().from_string(content)
content = template.render(context)
with open(new_path, "w", encoding="utf-8") as new_file:
new_file.write(content)
else:
shutil.copyfile(old_path, new_path)
if self.verbosity >= 2:
self.stdout.write("Creating %s" % new_path)
try:
self.apply_umask(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path,
self.style.NOTICE,
)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.")
for path_to_remove in self.paths_to_remove:
if os.path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove)
run_formatters([top_dir], **formatter_paths, stderr=self.stderr)
def handle_template(self, template, subdir):
"""
Determine where the app or project templates are.
Use django.__path__[0] as the default because the Django install
directory isn't known.
"""
if template is None:
return os.path.join(django.__path__[0], "conf", subdir)
else:
template = template.removeprefix("file://")
expanded_template = os.path.expanduser(template)
expanded_template = os.path.normpath(expanded_template)
if os.path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = os.path.abspath(expanded_template)
if os.path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError(
"couldn't handle %s template %s." % (self.app_or_project, template)
)
def validate_name(self, name, name_or_dir="name"):
if name is None:
raise CommandError(
"you must provide {an} {app} name".format(
an=self.a_or_an,
app=self.app_or_project,
)
)
# Check it's a valid directory name.
if not name.isidentifier():
raise CommandError(
"'{name}' is not a valid {app} {type}. Please make sure the "
"{type} is a valid identifier.".format(
name=name,
app=self.app_or_project,
type=name_or_dir,
)
)
# Check that __spec__ doesn't exist.
if find_spec(name) is not None:
raise CommandError(
"'{name}' conflicts with the name of an existing Python "
"module and cannot be used as {an} {app} {type}. Please try "
"another {type}.".format(
name=name,
an=self.a_or_an,
app=self.app_or_project,
type=name_or_dir,
)
)
def download(self, url):
"""
Download the given URL and return the file name.
"""
def cleanup_url(url):
tmp = url.rstrip("/")
filename = tmp.split("/")[-1]
if url.endswith("/"):
display_url = tmp + "/"
else:
display_url = url
return filename, display_url
prefix = "django_%s_template_" % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix="_download")
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s" % display_url)
the_path = os.path.join(tempdir, filename)
opener = build_opener()
opener.addheaders = [("User-Agent", f"Django/{django.__version__}")]
try:
with opener.open(url) as source, open(the_path, "wb") as target:
headers = source.info()
target.write(source.read())
except OSError as e:
raise CommandError(
"couldn't download URL %s to %s: %s" % (url, filename, e)
)
used_name = the_path.split("/")[-1]
# Trying to get better name from response headers
content_disposition = headers["content-disposition"]
if content_disposition:
_, params = parse_header_parameters(content_disposition)
guessed_filename = params.get("filename") or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = headers["content-type"]
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognized by the archive utils
if used_name != guessed_filename:
guessed_path = os.path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith(".tar"):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extract the given file to a temporary directory and return
the path of the directory with the extracted content.
"""
prefix = "django_%s_template_" % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix="_extract")
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, OSError) as e:
raise CommandError(
"couldn't extract file %s to %s: %s" % (filename, tempdir, e)
)
def is_url(self, template):
"""Return True if the name looks like a URL."""
if ":" not in template:
return False
scheme = template.split(":", 1)[0].lower()
return scheme in self.url_schemes
def apply_umask(self, old_path, new_path):
current_umask = os.umask(0)
os.umask(current_umask)
current_mode = stat.S_IMODE(os.stat(old_path).st_mode)
os.chmod(new_path, current_mode & ~current_umask)
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| TemplateCommand |
python | allegroai__clearml | clearml/utilities/process/mp.py | {
"start": 3992,
"end": 5146
} | class ____(_ForkSafeThreadSyncObject):
def __init__(self, value: int = 1) -> None:
super(ForkSemaphore, self).__init__(functor=partial(Semaphore, value))
def acquire(self, *args: Any, **kwargs: Any) -> Optional[bool]:
try:
self._create()
except BaseException: # noqa
return None
return self._sync.acquire(*args, **kwargs)
def release(self, *args: Any, **kwargs: Any) -> Optional[None]:
if self._sync is None:
return None
self._create()
return self._sync.release(*args, **kwargs)
def get_value(self) -> int:
self._create()
return self._sync.get_value()
def __enter__(self) -> "SingletonLock":
"""Return `self` upon entering the runtime context."""
self.acquire()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Raise any exception triggered within the runtime context."""
# Do whatever cleanup.
self.release()
| ForkSemaphore |
python | milvus-io__pymilvus | tests/test_grpc_handler.py | {
"start": 20467,
"end": 23657
} | class ____:
def test_setup_grpc_channel_with_tls(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.secure_channel') as mock_secure:
with patch('pymilvus.client.grpc_handler.grpc.ssl_channel_credentials') as mock_creds:
with patch('pymilvus.client.grpc_handler.Path') as mock_path:
# Mock file reading
mock_file = MagicMock()
mock_file.read.return_value = b"cert_content"
mock_path.return_value.open.return_value.__enter__.return_value = mock_file
mock_secure.return_value = MagicMock()
mock_creds.return_value = MagicMock()
GrpcHandler(
uri="http://localhost:19530",
secure=True,
server_pem_path="/path/to/server.pem"
)
# Verify secure channel was created
mock_creds.assert_called_once()
mock_secure.assert_called_once()
def test_setup_grpc_channel_with_client_certs(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.secure_channel') as mock_secure:
with patch('pymilvus.client.grpc_handler.grpc.ssl_channel_credentials') as mock_creds:
with patch('pymilvus.client.grpc_handler.Path') as mock_path:
# Mock file reading
mock_file = MagicMock()
mock_file.read.return_value = b"cert_content"
mock_path.return_value.open.return_value.__enter__.return_value = mock_file
mock_secure.return_value = MagicMock()
mock_creds.return_value = MagicMock()
GrpcHandler(
uri="http://localhost:19530",
secure=True,
client_pem_path="/path/to/client.pem",
client_key_path="/path/to/client.key",
ca_pem_path="/path/to/ca.pem"
)
# Verify secure channel was created
mock_creds.assert_called_once()
mock_secure.assert_called_once()
def test_setup_grpc_channel_with_server_name_override(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.secure_channel') as mock_secure:
with patch('pymilvus.client.grpc_handler.grpc.ssl_channel_credentials') as mock_creds:
mock_secure.return_value = MagicMock()
mock_creds.return_value = MagicMock()
GrpcHandler(
uri="http://localhost:19530",
secure=True,
server_name="custom.server.name"
)
# Check that the server name override was added to options
call_args = mock_secure.call_args
options = call_args[1]['options']
assert any(
opt[0] == "grpc.ssl_target_name_override" and opt[1] == "custom.server.name"
for opt in options
)
| TestGrpcHandlerSecureConnection |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/base_streams.py | {
"start": 5137,
"end": 7389
} | class ____(ShopifyStream):
data_field = "events"
primary_key = "id"
cursor_field = "deleted_at"
def __init__(self, config: Dict, deleted_events_api_name: str) -> None:
self.deleted_events_api_name = deleted_events_api_name
super().__init__(config)
@property
def availability_strategy(self) -> None:
"""
No need to apply the `availability strategy` for this service stream.
"""
return None
def get_json_schema(self) -> None:
"""
No need to apply the `schema` for this service stream.
Return `{}` to satisfy the `self._transformer.transform(record)` logic.
"""
return {}
def produce_deleted_records_from_events(self, delete_events: Iterable[Mapping[str, Any]] = []) -> Iterable[Mapping[str, Any]]:
for event in delete_events:
yield {
"id": event["subject_id"],
self.cursor_field: event["created_at"],
"deleted_message": event.get("message", None),
"deleted_description": event.get("description", None),
"shop_url": event["shop_url"],
}
def read_records(self, stream_state: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
delete_events = super().read_records(stream_state=stream_state, **kwargs)
yield from self.produce_deleted_records_from_events(delete_events)
def request_params(
self,
stream_state: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
**kwargs,
) -> MutableMapping[str, Any]:
params: Mapping[str, Any] = {}
if next_page_token:
# `filter` and `verb` cannot be passed, when `page_info` is present.
# See https://shopify.dev/api/usage/pagination-rest
params.update(**next_page_token)
else:
params.update(**{"filter": self.deleted_events_api_name, "verb": "destroy"})
if stream_state:
state = stream_state.get("deleted", {}).get(self.cursor_field)
if state:
params["created_at_min"] = state
return params
| ShopifyDeletedEventsStream |
python | PyCQA__pylint | tests/functional/n/none_dunder_protocols.py | {
"start": 507,
"end": 607
} | class ____(metaclass=MetaContainer):
__len__, __iter__ = [None, None]
| MultipleAssignmentNonesClass |
python | eventlet__eventlet | tests/asyncio_test.py | {
"start": 552,
"end": 7777
} | class ____(_TestBase):
"""
High-level tests for using ``asyncio``-based code inside greenlets.
For this functionality to be useful, users need to be able to use 3rd party
libraries that use sockets etc.. Merely hooking up futures to greenlets
doesn't help if you can't use the asyncio library ecosystem. So this set
of tests does more integration-y tests showing that functionality works.
"""
def set_site(self):
self.site = Site()
def test_aiohttp_client(self):
"""
The ``aiohttp`` HTTP client works correctly on top of eventlet.
"""
import aiohttp
async def request():
host, port = self.server_addr
async with aiohttp.ClientSession() as session:
url = "http://{}:{}/".format(host, port)
async with session.get(url) as response:
html = await response.text()
return html
gthread = spawn_for_awaitable(request())
assert gthread.wait() == "hello world"
def test_result():
"""
The result of the coroutine is returned by the ``GreenThread`` created by
``spawn_for_awaitable``.
"""
async def go():
await asyncio.sleep(0.0001)
return 13
assert spawn_for_awaitable(go()).wait() == 13
def test_exception():
"""
An exception raised by the coroutine is raised by ``GreenThread.wait()``
for the green thread created by ``spawn_for_awaitable()``.
"""
async def go():
await asyncio.sleep(0.0001)
raise ZeroDivisionError()
with pytest.raises(ZeroDivisionError):
assert spawn_for_awaitable(go()).wait()
def test_future_and_task():
"""
``spawn_for_awaitable()`` can take an ``asyncio.Future`` or an
``asyncio.Task``.
"""
async def go(value):
return value * 2
assert spawn_for_awaitable(asyncio.ensure_future(go(8))).wait() == 16
assert spawn_for_awaitable(asyncio.create_task(go(6))).wait() == 12
def test_asyncio_sleep():
"""
``asyncio`` scheduled events work on eventlet.
"""
async def go():
start = time()
await asyncio.sleep(0.07)
return time() - start
elapsed = spawn_for_awaitable(go()).wait()
assert 0.05 < elapsed < 0.09
def test_kill_greenthread():
"""
If a ``GreenThread`` wrapping an ``asyncio.Future``/coroutine is killed,
the ``asyncio.Future`` is cancelled.
"""
the_greenthread = []
progress = []
async def go():
await asyncio.sleep(0.1)
progress.append(1)
while not the_greenthread:
await asyncio.sleep(0.001)
# Kill the green thread.
progress.append(2)
the_greenthread[0].kill()
progress.append(3)
await asyncio.sleep(1)
# This should never be reached:
progress.append(4)
future = asyncio.ensure_future(go())
the_greenthread.append(spawn_for_awaitable(future))
with pytest.raises(GreenletExit):
the_greenthread[0].wait()
assert progress == [1, 2, 3]
# Cancellation may not be immediate.
eventlet.sleep(0.01)
assert future.cancelled()
assert progress == [1, 2, 3]
def test_await_greenthread_success():
"""
``await`` on a ``GreenThread`` returns its eventual result.
"""
def greenlet():
eventlet.sleep(0.001)
return 23
async def go():
result = await eventlet.spawn(greenlet)
return result
assert spawn_for_awaitable(go()).wait() == 23
def test_await_greenthread_exception():
"""
``await`` on a ``GreenThread`` raises its eventual exception.
"""
def greenlet():
eventlet.sleep(0.001)
return 1 / 0
async def go():
try:
await eventlet.spawn(greenlet)
except ZeroDivisionError as e:
return e
result = spawn_for_awaitable(go()).wait()
assert isinstance(result, ZeroDivisionError)
def test_await_greenthread_success_immediate():
"""
``await`` on a ``GreenThread`` returns its immediate result.
"""
def greenlet():
return 23
async def go():
result = await eventlet.spawn(greenlet)
return result
assert spawn_for_awaitable(go()).wait() == 23
def test_await_greenthread_exception_immediate():
"""
``await`` on a ``GreenThread`` raises its immediate exception.
"""
def greenlet():
return 1 / 0
async def go():
try:
await eventlet.spawn(greenlet)
except ZeroDivisionError as e:
return e
result = spawn_for_awaitable(go()).wait()
assert isinstance(result, ZeroDivisionError)
def test_ensure_future():
"""
``asyncio.ensure_future()`` works correctly on a ``GreenThread``.
"""
def greenlet():
eventlet.sleep(0.001)
return 27
async def go():
future = asyncio.ensure_future(eventlet.spawn(greenlet))
result = await future
return result
assert spawn_for_awaitable(go()).wait() == 27
def test_cancelling_future_kills_greenthread():
"""
If the ``Future`` created by ``asyncio.ensure_future(a_green_thread)`` is
cancelled, the ``a_green_thread`` ``GreenThread`` is killed.
"""
phases = []
def green():
phases.append(1)
future.cancel()
eventlet.sleep(1)
# This should never be reached:
phases.append(2)
gthread = eventlet.spawn(green)
async def go():
try:
await gthread
except asyncio.CancelledError:
return "good"
else:
return "bad"
future = asyncio.ensure_future(go())
assert spawn_for_awaitable(future).wait() == "good"
with pytest.raises(GreenletExit):
gthread.wait()
assert phases == [1]
def test_greenthread_killed_while_awaited():
"""
If a ``GreenThread`` is killed, the ``async`` function ``await``ing it sees
it as cancellation.
"""
phases = []
def green():
phases.append(1)
eventlet.sleep(0.001)
phases.append(2)
getcurrent().kill()
eventlet.sleep(1)
# Should never be reached:
phases.append(3)
gthread = eventlet.spawn(green)
async def go():
try:
await gthread
return "where is my cancellation?"
except asyncio.CancelledError:
return "canceled!"
assert spawn_for_awaitable(go()).wait() == "canceled!"
assert phases == [1, 2]
@pytest.mark.skipif(
sys.version_info[:2] < (3, 9), reason="to_thread() is new Python 3.9"
)
def test_asyncio_to_thread():
"""
``asyncio.to_thread()`` works with Eventlet.
"""
tests.run_isolated("asyncio_to_thread.py")
def test_asyncio_does_not_use_greendns():
"""
``asyncio`` loops' ``getaddrinfo()`` and ``getnameinfo()`` do not use green
DNS.
"""
tests.run_isolated("asyncio_dns.py")
def test_make_sure_monkey_patching_asyncio_is_restricted():
"""
``asyncio`` continues to have original, unpatched ``socket`` etc classes.
"""
tests.run_isolated("asyncio_correct_patching.py")
| CallingAsyncFunctionsFromGreenletsHighLevelTests |
python | scipy__scipy | scipy/stats/tests/test_continuous.py | {
"start": 82121,
"end": 85361
} | class ____:
# Adds tests just to get to 100% test coverage; this way it's more obvious
# if new lines are untested.
def test_Domain(self):
with pytest.raises(NotImplementedError):
_Domain.contains(None, 1.)
with pytest.raises(NotImplementedError):
_Domain.get_numerical_endpoints(None, 1.)
with pytest.raises(NotImplementedError):
_Domain.__str__(None)
def test_Parameter(self):
with pytest.raises(NotImplementedError):
_Parameter.validate(None, 1.)
@pytest.mark.parametrize(("dtype_in", "dtype_out"),
[(np.float16, np.float16),
(np.int16, np.float64)])
def test_RealParameter_uncommon_dtypes(self, dtype_in, dtype_out):
domain = _RealInterval((-1, 1))
parameter = _RealParameter('x', domain=domain)
x = np.asarray([0.5, 2.5], dtype=dtype_in)
arr, dtype, valid = parameter.validate(x, parameter_values={})
assert_equal(arr, x)
assert dtype == dtype_out
assert_equal(valid, [True, False])
def test_ContinuousDistribution_set_invalid_nan(self):
# Exercise code paths when formula returns wrong shape and dtype
# We could consider making this raise an error to force authors
# to return the right shape and dytpe, but this would need to be
# configurable.
class TestDist(ContinuousDistribution):
_variable = _RealParameter('x', domain=_RealInterval(endpoints=(0., 1.)))
def _logpdf_formula(self, x, *args, **kwargs):
return 0
X = TestDist()
dtype = np.float32
X._dtype = dtype
x = np.asarray([0.5], dtype=dtype)
assert X.logpdf(x).dtype == dtype
def test_fiinfo(self):
assert _fiinfo(np.float64(1.)).max == np.finfo(np.float64).max
assert _fiinfo(np.int64(1)).max == np.iinfo(np.int64).max
def test_generate_domain_support(self):
msg = _generate_domain_support(StandardNormal)
assert "accepts no distribution parameters" in msg
msg = _generate_domain_support(Normal)
assert "accepts one parameterization" in msg
msg = _generate_domain_support(_LogUniform)
assert "accepts two parameterizations" in msg
def test_ContinuousDistribution__repr__(self):
X = Uniform(a=0, b=1)
if np.__version__ < "2":
assert repr(X) == "Uniform(a=0.0, b=1.0)"
else:
assert repr(X) == "Uniform(a=np.float64(0.0), b=np.float64(1.0))"
if np.__version__ < "2":
assert repr(X*3 + 2) == "3.0*Uniform(a=0.0, b=1.0) + 2.0"
else:
assert repr(X*3 + 2) == (
"np.float64(3.0)*Uniform(a=np.float64(0.0), b=np.float64(1.0))"
" + np.float64(2.0)"
)
X = Uniform(a=np.zeros(4), b=1)
assert repr(X) == "Uniform(a=array([0., 0., 0., 0.]), b=1)"
X = Uniform(a=np.zeros(4, dtype=np.float32), b=np.ones(4, dtype=np.float32))
assert repr(X) == (
"Uniform(a=array([0., 0., 0., 0.], dtype=float32),"
" b=array([1., 1., 1., 1.], dtype=float32))"
)
| TestFullCoverage |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/types.py | {
"start": 1939,
"end": 2049
} | class ____(_NetworkAddressTypeMixin, sqltypes.TypeEngine[str]):
__visit_name__ = "INET"
PGInet = INET
| INET |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/quality/test_poisoned_lists.py | {
"start": 771,
"end": 1121
} | class ____(SearchStrategy):
def __init__(self, poison_chance):
super().__init__()
self.__poison_chance = poison_chance
self.__ints = st.integers(0, 10)
def do_draw(self, data):
if data.draw_boolean(self.__poison_chance):
return POISON
else:
return data.draw(self.__ints)
| Poisoned |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict13.py | {
"start": 298,
"end": 429
} | class ____(ParentA):
# This should generate an error because the type of "age" is redefined.
age: float
name: str
| ChildA |
python | h5py__h5py | h5py/tests/test_vds/test_highlevel_vds.py | {
"start": 6527,
"end": 9094
} | class ____(ut.TestCase):
def setUp(self):
self.working_dir = tempfile.mkdtemp()
self.fname = ['raw_file_1.h5','raw_file_2.h5','raw_file_3.h5']
k = 0
for outfile in self.fname:
filename = osp.join(self.working_dir, outfile)
f = h5.File(filename,'w')
f['data'] = np.ones((20,200,200))*k
k +=1
f.close()
f = h5.File(osp.join(self.working_dir, 'raw_file_4.h5'), 'w')
f['data'] = np.ones((19,200,200))*3
self.fname.append('raw_file_4.h5')
self.fname = [osp.join(self.working_dir, ix) for ix in self.fname]
f.close()
def test_percival_high_level(self):
outfile = osp.join(self.working_dir, make_name('percival{}.h5'))
# Virtual layout is a representation of the output dataset
layout = h5.VirtualLayout(shape=(79, 200, 200), dtype=np.float64)
for k, filename in enumerate(self.fname):
dim1 = 19 if k == 3 else 20
vsource = h5.VirtualSource(filename, 'data',shape=(dim1, 200, 200))
layout[k:79:4, :, :] = vsource[:, :, :]
# Create the virtual dataset file
with h5.File(outfile, 'w', libver='latest') as f:
f.create_virtual_dataset('data', layout, fillvalue=-5)
foo = np.array(2 * list(range(4)))
with h5.File(outfile,'r') as f:
ds = f['data']
line = ds[:8,100,100]
self.assertEqual(ds.shape, (79,200,200),)
assert_array_equal(line, foo)
def test_percival_source_from_dataset(self):
outfile = osp.join(self.working_dir, make_name('percival{}.h5'))
# Virtual layout is a representation of the output dataset
layout = h5.VirtualLayout(shape=(79, 200, 200), dtype=np.float64)
for k, filename in enumerate(self.fname):
with h5.File(filename, 'r') as f:
vsource = h5.VirtualSource(f['data'])
layout[k:79:4, :, :] = vsource
# Create the virtual dataset file
with h5.File(outfile, 'w', libver='latest') as f:
f.create_virtual_dataset('data', layout, fillvalue=-5)
foo = np.array(2 * list(range(4)))
with h5.File(outfile,'r') as f:
ds = f['data']
line = ds[:8,100,100]
self.assertEqual(ds.shape, (79,200,200),)
assert_array_equal(line, foo)
def tearDown(self):
shutil.rmtree(self.working_dir)
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
| TestPercivalHighLevel |
python | PrefectHQ__prefect | tests/server/utilities/test_server.py | {
"start": 175,
"end": 765
} | class ____:
@pytest.fixture
def client(self):
app = FastAPI()
router = PrefectRouter()
@router.get("/{x}")
def echo(x: str):
return x
app.include_router(router)
client = TestClient(app)
return client
def test_url_encoded_variables(self, client):
"""FastAPI automatically handles url-encoded variables"""
x = "| ; 👍"
response = client.get(f"/{x}")
quoted_response = client.get(urllib.parse.quote(f"/{x}"))
assert x == response.json() == quoted_response.json()
| TestParsing |
python | walkccc__LeetCode | solutions/1584. Min Cost to Connect All Points/1584.py | {
"start": 0,
"end": 739
} | class ____:
def minCostConnectPoints(self, points: list[int]) -> int:
# dist[i] := the minimum distance to connect the points[i]
dist = [math.inf] * len(points)
ans = 0
for i in range(len(points) - 1):
for j in range(i + 1, len(points)):
# Try to connect the points[i] with the points[j].
dist[j] = min(dist[j], abs(points[i][0] - points[j][0]) +
abs(points[i][1] - points[j][1]))
# Swap the points[j] (the point with the mnimum distance) with the
# points[i + 1].
if dist[j] < dist[i + 1]:
points[j], points[i + 1] = points[i + 1], points[j]
dist[j], dist[i + 1] = dist[i + 1], dist[j]
ans += dist[i + 1]
return ans
| Solution |
python | huggingface__transformers | conftest.py | {
"start": 4657,
"end": 5688
} | class ____(OutputChecker):
def check_output(self, want, got, optionflags):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, want, got, optionflags)
doctest.OutputChecker = CustomOutputChecker
_pytest.doctest.DoctestModule = HfDoctestModule
doctest.DocTestParser = HfDocTestParser
if is_torch_available():
import torch
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
# We set it to `False` for CI. See https://github.com/pytorch/pytorch/issues/157274#issuecomment-3090791615
torch.backends.cudnn.allow_tf32 = False
# patch `torch.compile`: if `TORCH_COMPILE_FORCE_FULLGRAPH=1` (or values considered as true, e.g. yes, y, etc.),
# the patched version will always run with `fullgraph=True`.
patch_torch_compile_force_graph()
if os.environ.get("PATCH_TESTING_METHODS_TO_COLLECT_OUTPUTS", "").lower() in ("yes", "true", "on", "y", "1"):
patch_testing_methods_to_collect_info()
| CustomOutputChecker |
python | pytransitions__transitions | transitions/core.py | {
"start": 7666,
"end": 13081
} | class ____(object):
"""Representation of a transition managed by a ``Machine`` instance.
Attributes:
source (str): Source state of the transition.
dest (str): Destination state of the transition.
prepare (list): Callbacks executed before conditions checks.
conditions (list): Callbacks evaluated to determine if
the transition should be executed.
before (list): Callbacks executed before the transition is executed
but only if condition checks have been successful.
after (list): Callbacks executed after the transition is executed
but only if condition checks have been successful.
"""
dynamic_methods = ['before', 'after', 'prepare']
""" A list of dynamic methods which can be resolved by a ``Machine`` instance for convenience functions. """
condition_cls = Condition
""" The class used to wrap condition checks. Can be replaced to alter condition resolution behaviour
(e.g. OR instead of AND for 'conditions' or AND instead of OR for 'unless') """
def __init__(self, source, dest, conditions=None, unless=None, before=None,
after=None, prepare=None):
"""
Args:
source (str): The name of the source State.
dest (str): The name of the destination State.
conditions (optional[str, callable or list]): Condition(s) that must pass in order for
the transition to take place. Either a string providing the
name of a callable, or a list of callables. For the transition
to occur, ALL callables must return True.
unless (optional[str, callable or list]): Condition(s) that must return False in order
for the transition to occur. Behaves just like conditions arg
otherwise.
before (optional[str, callable or list]): callbacks to trigger before the
transition.
after (optional[str, callable or list]): callbacks to trigger after the transition.
prepare (optional[str, callable or list]): callbacks to trigger before conditions are checked
"""
self.source = source
self.dest = dest
self.prepare = [] if prepare is None else listify(prepare)
self.before = [] if before is None else listify(before)
self.after = [] if after is None else listify(after)
self.conditions = []
if conditions is not None:
for cond in listify(conditions):
self.conditions.append(self.condition_cls(cond))
if unless is not None:
for cond in listify(unless):
self.conditions.append(self.condition_cls(cond, target=False))
def _eval_conditions(self, event_data):
for cond in self.conditions:
if not cond.check(event_data):
_LOGGER.debug("%sTransition condition failed: %s() does not return %s. Transition halted.",
event_data.machine.name, cond.func, cond.target)
return False
return True
def execute(self, event_data):
"""Execute the transition.
Args:
event_data: An instance of class EventData.
Returns: boolean indicating whether the transition was
successfully executed (True if successful, False if not).
"""
_LOGGER.debug("%sInitiating transition from state %s to state %s...",
event_data.machine.name, self.source, self.dest)
event_data.machine.callbacks(self.prepare, event_data)
_LOGGER.debug("%sExecuted callbacks before conditions.", event_data.machine.name)
if not self._eval_conditions(event_data):
return False
event_data.machine.callbacks(itertools.chain(event_data.machine.before_state_change, self.before), event_data)
_LOGGER.debug("%sExecuted callback before transition.", event_data.machine.name)
if self.dest is not None: # if self.dest is None this is an internal transition with no actual state change
self._change_state(event_data)
event_data.machine.callbacks(itertools.chain(self.after, event_data.machine.after_state_change), event_data)
_LOGGER.debug("%sExecuted callback after transition.", event_data.machine.name)
return True
def _change_state(self, event_data):
event_data.machine.get_state(self.source).exit(event_data)
event_data.machine.set_state(self.dest, event_data.model)
event_data.update(getattr(event_data.model, event_data.machine.model_attribute))
dest = event_data.machine.get_state(self.dest)
dest.enter(event_data)
if dest.final:
event_data.machine.callbacks(event_data.machine.on_final, event_data)
def add_callback(self, trigger, func):
"""Add a new before, after, or prepare callback.
Args:
trigger (str): The type of triggering event. Must be one of
'before', 'after' or 'prepare'.
func (str or callable): The name of the callback function or a callable.
"""
callback_list = getattr(self, trigger)
callback_list.append(func)
def __repr__(self):
return "<%s('%s', '%s')@%s>" % (type(self).__name__,
self.source, self.dest, id(self))
| Transition |
python | python-attrs__attrs | tests/test_functional.py | {
"start": 1317,
"end": 1380
} | class ____(metaclass=Meta):
pass
@attr.s(slots=True)
| WithMeta |
python | python-excel__xlwt | tests/test_biff_records.py | {
"start": 506,
"end": 872
} | class ____(unittest.TestCase):
def test_intersheets_ref(self):
book = xlwt.Workbook()
sheet_a = book.add_sheet('A')
sheet_a.write(0, 0, 'A1')
sheet_a.write(0, 1, 'A2')
sheet_b = book.add_sheet('B')
sheet_b.write(0, 0, xlwt.Formula("'A'!$A$1&'A'!$A$2"))
out = BytesIO()
book.save(out)
| TestIntersheetsRef |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 27521,
"end": 27862
} | class ____(CharField):
default_error_messages = {
'invalid': _('This value does not match the required pattern.')
}
def __init__(self, regex, **kwargs):
super().__init__(**kwargs)
validator = RegexValidator(regex, message=self.error_messages['invalid'])
self.validators.append(validator)
| RegexField |
python | lepture__authlib | authlib/integrations/sqla_oauth2/tokens_mixins.py | {
"start": 1218,
"end": 2261
} | class ____(TokenMixin):
client_id = Column(String(48))
token_type = Column(String(40))
access_token = Column(String(255), unique=True, nullable=False)
refresh_token = Column(String(255), index=True)
scope = Column(Text, default="")
issued_at = Column(Integer, nullable=False, default=lambda: int(time.time()))
access_token_revoked_at = Column(Integer, nullable=False, default=0)
refresh_token_revoked_at = Column(Integer, nullable=False, default=0)
expires_in = Column(Integer, nullable=False, default=0)
def check_client(self, client):
return self.client_id == client.get_client_id()
def get_scope(self):
return self.scope
def get_expires_in(self):
return self.expires_in
def is_revoked(self):
return self.access_token_revoked_at or self.refresh_token_revoked_at
def is_expired(self):
if not self.expires_in:
return False
expires_at = self.issued_at + self.expires_in
return expires_at < time.time()
| OAuth2TokenMixin |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 53388,
"end": 54866
} | class ____(PrefectOperatorFilterBaseModel):
"""Filter logs. Only logs matching all criteria will be returned"""
level: Optional[LogFilterLevel] = Field(
default=None, description="Filter criteria for `Log.level`"
)
timestamp: Optional[LogFilterTimestamp] = Field(
default=None, description="Filter criteria for `Log.timestamp`"
)
flow_run_id: Optional[LogFilterFlowRunId] = Field(
default=None, description="Filter criteria for `Log.flow_run_id`"
)
task_run_id: Optional[LogFilterTaskRunId] = Field(
default=None, description="Filter criteria for `Log.task_run_id`"
)
text: Optional[LogFilterTextSearch] = Field(
default=None, description="Filter criteria for text search across log content"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.level is not None:
filters.append(self.level.as_sql_filter())
if self.timestamp is not None:
filters.append(self.timestamp.as_sql_filter())
if self.flow_run_id is not None:
filters.append(self.flow_run_id.as_sql_filter())
if self.task_run_id is not None:
filters.append(self.task_run_id.as_sql_filter())
if self.text is not None:
filters.extend(self.text._get_filter_list(db))
return filters
| LogFilter |
python | huggingface__transformers | tests/models/speecht5/test_modeling_speecht5.py | {
"start": 6922,
"end": 12426
} | class ____:
def __init__(
self,
parent,
batch_size=13,
encoder_seq_length=1024, # speech is longer
decoder_seq_length=7,
is_training=False,
hidden_size=24,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=4,
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
vocab_size=81,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.decoder_seq_length = decoder_seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.vocab_size = vocab_size
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.encoder_seq_length], scale=1.0)
attention_mask = random_attention_mask([self.batch_size, self.encoder_seq_length])
decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size).clamp(2)
decoder_attention_mask = random_attention_mask([self.batch_size, self.decoder_seq_length])
config = self.get_config()
inputs_dict = prepare_inputs_dict(
config,
input_values=input_values,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def get_config(self):
return SpeechT5Config(
hidden_size=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
vocab_size=self.vocab_size,
)
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for stride in self.conv_stride:
input_lengths = (input_lengths // stride) - 1
return input_lengths
def create_and_check_model_forward(self, config, inputs_dict):
model = SpeechT5ForSpeechToText(config=config).to(torch_device).eval()
input_values = inputs_dict["input_values"]
attention_mask = inputs_dict["attention_mask"]
decoder_input_ids = inputs_dict["decoder_input_ids"]
result = model(input_values, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.decoder_seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = SpeechT5ForSpeechToText(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["decoder_input_ids"]
attention_mask = inputs_dict["decoder_attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(
next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, use_cache=True
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2))
@require_torch
| SpeechT5ForSpeechToTextTester |
python | numba__numba | numba/tests/test_dictobject.py | {
"start": 35488,
"end": 36339
} | class ____(TestCase, DictIterableCtor):
def setUp(self):
self.jit_enabled = False
def test_exception_nargs(self):
msg = 'Dict expect at most 1 argument, got 2'
with self.assertRaisesRegex(TypingError, msg):
Dict(1, 2)
def test_exception_mapping_ctor(self):
msg = r'.*dict\(mapping\) is not supported.*' # noqa: W605
with self.assertRaisesRegex(TypingError, msg):
Dict({1: 2})
def test_exception_non_iterable_arg(self):
msg = '.*object is not iterable.*'
with self.assertRaisesRegex(TypingError, msg):
Dict(3)
def test_exception_setitem(self):
msg = ".*dictionary update sequence element #1 has length 3.*"
with self.assertRaisesRegex(ValueError, msg):
Dict(((1, 'a'), (2, 'b', 3)))
| TestDictIterableCtorNoJit |
python | Textualize__textual | src/textual/app.py | {
"start": 6812,
"end": 6915
} | class ____(ModeError):
"""Raised when attempting to use a mode that is not known."""
| UnknownModeError |
python | django__django | django/contrib/gis/geos/prototypes/geom.py | {
"start": 1249,
"end": 3400
} | class ____(GEOSFuncFactory):
"Argument is a Geometry, return type is a string."
argtypes = [GEOM_PTR]
restype = geos_char_p
errcheck = staticmethod(check_string)
# ### ctypes prototypes ###
# The GEOS geometry type, typeid, num_coordinates and number of geometries
geos_makevalid = GeomOutput("GEOSMakeValid", argtypes=[GEOM_PTR])
geos_normalize = IntFromGeom("GEOSNormalize")
geos_type = StringFromGeom("GEOSGeomType")
geos_typeid = IntFromGeom("GEOSGeomTypeId")
get_dims = GEOSFuncFactory("GEOSGeom_getDimensions", argtypes=[GEOM_PTR], restype=c_int)
get_num_coords = IntFromGeom("GEOSGetNumCoordinates")
get_num_geoms = IntFromGeom("GEOSGetNumGeometries")
# Geometry creation factories
create_point = GeomOutput("GEOSGeom_createPoint", argtypes=[CS_PTR])
create_linestring = GeomOutput("GEOSGeom_createLineString", argtypes=[CS_PTR])
create_linearring = GeomOutput("GEOSGeom_createLinearRing", argtypes=[CS_PTR])
# Polygon and collection creation routines need argument types defined
# for compatibility with some platforms, e.g. macOS ARM64. With argtypes
# defined, arrays are automatically cast and byref() calls are not needed.
create_polygon = GeomOutput(
"GEOSGeom_createPolygon",
argtypes=[GEOM_PTR, POINTER(GEOM_PTR), c_uint],
)
create_empty_polygon = GeomOutput("GEOSGeom_createEmptyPolygon", argtypes=[])
create_collection = GeomOutput(
"GEOSGeom_createCollection",
argtypes=[c_int, POINTER(GEOM_PTR), c_uint],
)
# Ring routines
get_extring = GeomOutput("GEOSGetExteriorRing", argtypes=[GEOM_PTR])
get_intring = GeomOutput("GEOSGetInteriorRingN", argtypes=[GEOM_PTR, c_int])
get_nrings = IntFromGeom("GEOSGetNumInteriorRings")
# Collection Routines
get_geomn = GeomOutput("GEOSGetGeometryN", argtypes=[GEOM_PTR, c_int])
# Cloning
geom_clone = GEOSFuncFactory("GEOSGeom_clone", argtypes=[GEOM_PTR], restype=GEOM_PTR)
# Destruction routine.
destroy_geom = GEOSFuncFactory("GEOSGeom_destroy", argtypes=[GEOM_PTR])
# SRID routines
geos_get_srid = GEOSFuncFactory("GEOSGetSRID", argtypes=[GEOM_PTR], restype=c_int)
geos_set_srid = GEOSFuncFactory("GEOSSetSRID", argtypes=[GEOM_PTR, c_int])
| StringFromGeom |
python | pypa__warehouse | warehouse/organizations/models.py | {
"start": 7843,
"end": 9240
} | class ____:
def __init__(self, request):
self.request = request
def __getitem__(self, organization):
# Try returning organization with matching name.
try:
return (
self.request.db.query(Organization)
.filter(
Organization.normalized_name
== func.normalize_pep426_name(organization)
)
.one()
)
except NoResultFound:
pass
# Try redirecting to a renamed organization.
try:
organization = (
self.request.db.query(Organization)
.join(
OrganizationNameCatalog,
OrganizationNameCatalog.organization_id == Organization.id,
)
.filter(
OrganizationNameCatalog.normalized_name
== func.normalize_pep426_name(organization)
)
.one()
)
raise HTTPPermanentRedirect(
self.request.matched_route.generate(
{
**self.request.matchdict,
"organization_name": organization.normalized_name,
}
)
)
except NoResultFound:
raise KeyError from None
| OrganizationFactory |
python | getsentry__sentry | src/sentry/monitors/system_incidents.py | {
"start": 14090,
"end": 21997
} | class ____(StrEnum):
"""
A metric is similar to a tick decision, however it represents a decision
made on the volume metric. The metric we current consider is percent mean
deviation from historic volumes.
"""
NORMAL = "normal"
"""
The metric is below the abnormal threshold.
"""
ABNORMAL = "abnormal"
"""
The metric has surpassed the normal threshold but is still below the
incident threshold.
"""
INCIDENT = "incident"
"""
The metric has surpassed the incident threshold
"""
@staticmethod
def from_value(value: float | str | None) -> Metric:
"""
Determine an individual decision for the percentage deviation metric of a
clock tick. This only considers metrics that are negative, indicating
there's been a drop in check-in volume.
"""
# examples: -5% anomaly and -25% incident
anomaly_threshold = options.get("crons.system_incidents.pct_deviation_anomaly_threshold")
incident_threshold = options.get("crons.system_incidents.pct_deviation_incident_threshold")
# If we do not have a metric for this tick we must assume things are
# operating normally
if value is None:
return Metric.NORMAL
pct_deviation = float(value)
if pct_deviation <= incident_threshold:
return Metric.INCIDENT
if pct_deviation <= anomaly_threshold:
return Metric.ABNORMAL
return Metric.NORMAL
def make_clock_tick_decision(tick: datetime) -> DecisionResult:
"""
Given a clock tick timestamp determine based on the historic tick volume
metrics, and historic tick anomaly decisions, a DecisionResult.
This function will update previous decisions for earlier ticks detected as
ABNORMAL or RECOVERING to either NORMAL or INCIDENT.
The state transitions for tick decisions are as follows
┌───D────────────────────────────┐
┌────▼─┐ ┌────────┐ ┌────────┐ ┌┴─────────┐
│NORMAL├─A─►ABNORMAL├┬F─►INCIDENT├─C─►RECOVERING│
│ ◄─B─│ ││ │ ◄─E─┤ │
└────┬─┘ └────────┘│ └────────┘ └──────────┘
└───────────────┘
A: ABNORMALITY_STARTED
B: ABNORMALITY_RECOVERED
C: INCIDENT_RECOVERING
D: INCIDENT_RECOVERED
E: INCIDENT_RECOVERY_FAILED
F: INCIDENT_STARTED
"""
# Alias TickAnomalyDecision to improve code readability
Decision = TickAnomalyDecision
if not options.get("crons.system_incidents.collect_metrics"):
return DecisionResult(tick, Decision.NORMAL)
redis_client = redis.redis_clusters.get(settings.SENTRY_MONITORS_REDIS_CLUSTER)
tick_decision_window = options.get("crons.system_incidents.tick_decision_window")
# The clock has just ticked to the next minute. Look at the previous tick
# and decision metrics.
past_ts = tick - timedelta(minutes=1)
past_window_ts_keys = [
_make_reference_ts(past_ts - timedelta(minutes=delta))
for delta in range(0, tick_decision_window)
]
# Fetch histories for metrics and the last decision together. Window
# timestamps are reversed so the oldest metric is last.
pipeline = redis_client.pipeline()
for key in chain(
(MONITOR_TICK_METRIC.format(ts=ts) for ts in reversed(past_window_ts_keys)),
(MONITOR_TICK_DECISION.format(ts=ts) for ts in [past_window_ts_keys[0]]),
):
pipeline.get(key)
values = pipeline.execute()
# Tick metrics are the first tick_decision_window values
tick_metrics = [Metric.from_value(value) for value in values[:-1]]
last_metric = tick_metrics[-1]
# The last decision is the last value fetched
if values[-1] is not None:
last_decision = Decision.from_str(values[-1])
else:
# By default the previous decision is used. If there was no previous
# decision we can only assume things are operating normally
last_decision = Decision.NORMAL
def make_decision(
decision: TickAnomalyDecision,
transition: AnomalyTransition | None = None,
ts: datetime | None = None,
) -> DecisionResult:
decision_key = MONITOR_TICK_DECISION.format(ts=_make_reference_ts(tick))
pipeline = redis_client.pipeline()
pipeline.set(decision_key, decision)
pipeline.expire(decision_key, MONITOR_VOLUME_RETENTION)
pipeline.execute()
logger.info(
"clock_tick_decision",
extra={
"reference_datetime": str(tick),
"decision": decision,
"transition": transition,
},
)
return DecisionResult(ts or tick, decision, transition)
def metrics_match(metric: Metric) -> Generator[bool]:
return (d == metric for d in tick_metrics)
# A: NORMAL -> ABNORMAL
#
# If we've detected an anomaly and we're not already in an incident,
# anomalous state, or recovering, mark this tick as anomalous.
if last_decision == Decision.NORMAL and last_metric == Metric.ABNORMAL:
return make_decision(Decision.ABNORMAL, AnomalyTransition.ABNORMALITY_STARTED)
# B: ABNORMAL -> NORMAL
#
# If the previous result was anomalous check and if we have recovered and can
# backfill these decisions as normal
if last_decision == Decision.ABNORMAL and all(metrics_match(Metric.NORMAL)):
_backfill_decisions(past_ts, Decision.NORMAL, Decision.ABNORMAL)
return make_decision(Decision.NORMAL, AnomalyTransition.ABNORMALITY_RECOVERED)
# C: INCIDENT -> RECOVERING
#
# If we are actively in an incident and the most recent metric value has
# recovered to normal we can de-escalate the incident to abnormal.
if last_decision == Decision.INCIDENT and last_metric == Metric.NORMAL:
return make_decision(Decision.RECOVERING, AnomalyTransition.INCIDENT_RECOVERING)
# D: RECOVERING -> NORMAL
#
# If the previous result was recovering, check if we have recovered and can
# backfill these decisions as normal.
if last_decision == Decision.RECOVERING and all(metrics_match(Metric.NORMAL)):
ts = _backfill_decisions(past_ts, Decision.NORMAL, Decision.RECOVERING)
return make_decision(Decision.NORMAL, AnomalyTransition.INCIDENT_RECOVERED, ts)
# E: RECOVERING -> INCIDENT
#
# If an incident had begun recovering but we've detected a non-normal
# metric, backfill all recovery decisions to an incident decision.
if last_decision == Decision.RECOVERING and last_metric != Metric.NORMAL:
_backfill_decisions(past_ts, Decision.INCIDENT, Decision.RECOVERING)
return make_decision(Decision.INCIDENT, AnomalyTransition.INCIDENT_RECOVERY_FAILED)
# F: [NORMAL, ABNORMAL] -> INCIDENT
#
# If we're not already in an incident and the most recent metric value is
# an incident, mark this tick as an incident and backfill all abnormal
# decisions to an incident decision.
if last_decision != Decision.INCIDENT and last_metric == Metric.INCIDENT:
ts = _backfill_decisions(past_ts, Decision.INCIDENT, Decision.ABNORMAL)
return make_decision(Decision.INCIDENT, AnomalyTransition.INCIDENT_STARTED, ts)
# NORMAL -> NORMAL
# ABNORMAL -> ABNORMAL
# INCIDENT -> INCIDENT
# RECOVERING -> RECOVERING
#
# No decision transition. Use the previous decision
return make_decision(last_decision)
def get_clock_tick_decision(tick: datetime) -> TickAnomalyDecision | None:
"""
Retrieve the TickAnomalyDecision for a specific clock tick.
"""
redis_client = redis.redis_clusters.get(settings.SENTRY_MONITORS_REDIS_CLUSTER)
if value := redis_client.get(MONITOR_TICK_DECISION.format(ts=_make_reference_ts(tick))):
return TickAnomalyDecision.from_str(value)
else:
return None
@dataclass
| Metric |
python | aimacode__aima-python | utils.py | {
"start": 10045,
"end": 13241
} | class ____:
"""Dependency injection of temporary values for global functions/classes/etc.
E.g., `with injection(DataBase=MockDataBase): ...`"""
def __init__(self, **kwds):
self.new = kwds
def __enter__(self):
self.old = {v: globals()[v] for v in self.new}
globals().update(self.new)
def __exit__(self, type, value, traceback):
globals().update(self.old)
def memoize(fn, slot=None, maxsize=32):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, use lru_cache for caching the values."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
@functools.lru_cache(maxsize=maxsize)
def memoized_fn(*args):
return fn(*args)
return memoized_fn
def name(obj):
"""Try to find some reasonable name for the object."""
return (getattr(obj, 'name', 0) or getattr(obj, '__name__', 0) or
getattr(getattr(obj, '__class__', 0), '__name__', 0) or
str(obj))
def isnumber(x):
"""Is x a number?"""
return hasattr(x, '__int__')
def issequence(x):
"""Is x a sequence?"""
return isinstance(x, collections.abc.Sequence)
def print_table(table, header=None, sep=' ', numfmt='{}'):
"""Print a list of lists as a table, so that columns line up nicely.
header, if specified, will be printed as the first row.
numfmt is the format for all numbers; you might want e.g. '{:.2f}'.
(If you want different formats in different columns,
don't use print_table.) sep is the separator between columns."""
justs = ['rjust' if isnumber(x) else 'ljust' for x in table[0]]
if header:
table.insert(0, header)
table = [[numfmt.format(x) if isnumber(x) else x for x in row]
for row in table]
sizes = list(map(lambda seq: max(map(len, seq)), list(zip(*[map(str, row) for row in table]))))
for row in table:
print(sep.join(getattr(str(x), j)(size) for (j, size, x) in zip(justs, sizes, row)))
def open_data(name, mode='r'):
aima_root = os.path.dirname(__file__)
aima_file = os.path.join(aima_root, *['aima-data', name])
return open(aima_file, mode=mode)
def failure_test(algorithm, tests):
"""Grades the given algorithm based on how many tests it passes.
Most algorithms have arbitrary output on correct execution, which is difficult
to check for correctness. On the other hand, a lot of algorithms output something
particular on fail (for example, False, or None).
tests is a list with each element in the form: (values, failure_output)."""
return mean(int(algorithm(x) != y) for x, y in tests)
# ______________________________________________________________________________
# Expressions
# See https://docs.python.org/3/reference/expressions.html#operator-precedence
# See https://docs.python.org/3/reference/datamodel.html#special-method-names
| injection |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_struct.py | {
"start": 1431,
"end": 6701
} | class ____:
def test_init(self) -> None:
with pytest.raises(ValueError):
bcps.Struct()
def test_valid(self) -> None:
prop0 = bcps.Struct(a=Int, b=List(Int), c=Dict(Instance(_TestModel), String))
assert prop0.is_valid(dict(a=0, b=[1], c={_TestModel(): "x"}))
assert prop0.is_valid(bcps.struct(a=0, b=[1], c={_TestModel(): "x"}))
assert prop0.is_valid(SimpleNamespace(a=0, b=[1], c={_TestModel(): "x"}))
prop1 = bcps.Struct(a=Int, b=List(Int), c=bcps.Optional(Dict(Instance(_TestModel), String)))
assert prop1.is_valid(dict(a=0, b=[1]))
assert prop1.is_valid(bcps.struct(a=0, b=[1]))
assert prop1.is_valid(SimpleNamespace(a=0, b=[1]))
assert prop1.is_valid(dict(a=0, b=[1], c={_TestModel(): "x"}))
assert prop1.is_valid(bcps.struct(a=0, b=[1], c={_TestModel(): "x"}))
assert prop1.is_valid(SimpleNamespace(a=0, b=[1], c={_TestModel(): "x"}))
def test_invalid(self) -> None:
prop0 = bcps.Struct(a=Int, b=List(Int), c=Dict(Instance(_TestModel), String))
assert not prop0.is_valid(0)
assert not prop0.is_valid("")
assert not prop0.is_valid(None)
assert not prop0.is_valid([])
assert not prop0.is_valid({})
assert not prop0.is_valid(bcps.struct())
assert not prop0.is_valid(SimpleNamespace())
assert not prop0.is_valid({"a": 0})
assert not prop0.is_valid(bcps.struct(a=0))
assert not prop0.is_valid(SimpleNamespace(a=0))
assert not prop0.is_valid({"a": 0, "b": [1]})
assert not prop0.is_valid(bcps.struct(a=0, b=[1]))
assert not prop0.is_valid(SimpleNamespace(a=0, b=[1]))
assert not prop0.is_valid({"a": 0, "b": [1], "c": {_TestModel(): 0}})
assert not prop0.is_valid(bcps.struct(a=0, b=[1], c={_TestModel(): 0}))
assert not prop0.is_valid(SimpleNamespace(a=0, b=[1], c={_TestModel(): 0}))
assert not prop0.is_valid({"a": 0, "b": [1], "d": {_TestModel(): "x"}})
assert not prop0.is_valid(bcps.struct(a=0, b=[1], d={_TestModel(): "x"}))
assert not prop0.is_valid(SimpleNamespace(a=0, b=[1], d={_TestModel(): "x"}))
assert not prop0.is_valid({"a": 0, "b": [1], "c": {_TestModel(): "x"}, "d": "y"})
assert not prop0.is_valid(bcps.struct(a=0, b=[1], c={_TestModel(): "x"}, d="y"))
assert not prop0.is_valid(SimpleNamespace(a=0, b=[1], c={_TestModel(): "x"}, d="y"))
prop1 = bcps.Struct(a=Int, b=List(Int), c=Dict(Instance(_TestModel), String))
assert not prop1.is_valid(0)
assert not prop1.is_valid("")
assert not prop1.is_valid(None)
assert not prop1.is_valid([])
assert not prop1.is_valid({})
assert not prop1.is_valid(bcps.struct())
assert not prop1.is_valid(SimpleNamespace())
assert not prop1.is_valid({"a": 0})
assert not prop1.is_valid(bcps.struct(a=0))
assert not prop1.is_valid(SimpleNamespace(a=0))
assert not prop0.is_valid({"a": 0, "b": [1], "c": {_TestModel(): 0}})
assert not prop0.is_valid(bcps.struct(a=0, b=[1], c={_TestModel(): 0}))
assert not prop0.is_valid(SimpleNamespace(a=0, b=[1], c={_TestModel(): 0}))
assert not prop0.is_valid({"a": 0, "b": [1], "d": {_TestModel(): "x"}})
assert not prop0.is_valid(bcps.struct(a=0, b=[1], d={_TestModel(): "x"}))
assert not prop0.is_valid(SimpleNamespace(a=0, b=[1], d={_TestModel(): "x"}))
assert not prop0.is_valid({"a": 0, "b": [1], "c": {_TestModel(): "x"}, "d": "y"})
assert not prop0.is_valid(bcps.struct(a=0, b=[1], c={_TestModel(): "x"}, d="y"))
assert not prop0.is_valid(SimpleNamespace(a=0, b=[1], c={_TestModel(): "x"}, d="y"))
def test_has_ref(self) -> None:
prop0 = bcps.Struct(a=Int)
assert not prop0.has_ref
prop1 = bcps.Struct(a=Instance(_TestModel))
assert prop1.has_ref
def test_str(self) -> None:
prop = bcps.Struct(a=Int, b=List(Int), c=Dict(Instance(_TestModel), String))
assert str(prop) == "Struct(a=Int, b=List(Int), c=Dict(Instance(_TestModel), String))"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def test_struct() -> None:
obj = _TestModel()
s = bcps.struct(a=1, b=[1, 2, 3], c={obj: "x"})
assert s.a == 1
assert s["a"] == 1
assert s.b == [1, 2, 3]
assert s["b"] == [1, 2, 3]
assert s.c == {obj: "x"}
assert s["c"] == {obj: "x"}
s.a = 2
assert s.a == 2
assert s["a"] == 2
s["a"] = 3
assert s.a == 3
assert s["a"] == 3
s["d"] = {1, 2, 3}
assert s.d == {1, 2, 3}
assert s["d"] == {1, 2, 3}
assert s.__dict__ == dict(a=3, b=[1, 2, 3], c={obj: "x"}, d={1, 2, 3})
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcps, ALL)
| Test_Struct |
python | python-poetry__poetry | src/poetry/config/source.py | {
"start": 249,
"end": 1154
} | class ____:
name: str
url: str = ""
priority: Priority = (
Priority.PRIMARY
) # cheating in annotation: str will be converted to Priority in __post_init__
def __post_init__(self) -> None:
if isinstance(self.priority, str):
self.priority = Priority[self.priority.upper()]
def to_dict(self) -> dict[str, str | bool]:
return dataclasses.asdict(
self,
dict_factory=lambda x: {
k: v if not isinstance(v, Priority) else v.name.lower()
for (k, v) in x
if v
},
)
def to_toml_table(self) -> Table:
from tomlkit import nl
from tomlkit import table
source_table: Table = table()
for key, value in self.to_dict().items():
source_table.add(key, value)
source_table.add(nl())
return source_table
| Source |
python | ray-project__ray | python/ray/data/context.py | {
"start": 8436,
"end": 11505
} | class ____:
"""Configuration for autoscaling of Ray Data.
Args:
actor_pool_util_upscaling_threshold: Actor Pool utilization threshold for upscaling.
Once Actor Pool exceeds this utilization threshold it will start adding new actors.
Actor Pool utilization is defined as ratio of number of submitted tasks to the
number of available concurrency-slots to run them in the current set of actors.
This utilization value could exceed 100%, when the number of submitted tasks
exceed available concurrency-slots to run them in the current set of actors.
This is possible when `max_tasks_in_flight_per_actor`
(defaults to 2 x of `max_concurrency`) > Actor's `max_concurrency`
and allows to overlap task execution with the fetching of the blocks
for the next task providing for ability to negotiate a trade-off
between autoscaling speed and resource efficiency (i.e.,
making tasks wait instead of immediately triggering execution).
actor_pool_util_downscaling_threshold: Actor Pool utilization threshold for downscaling.
actor_pool_max_upscaling_delta: Maximum number of actors to scale up in a single scaling decision.
This limits how many actors can be added at once to prevent resource contention
and scheduling pressure. Defaults to 1 for conservative scaling.
"""
actor_pool_util_upscaling_threshold: float = (
DEFAULT_ACTOR_POOL_UTIL_UPSCALING_THRESHOLD
)
# Actor Pool utilization threshold for downscaling
actor_pool_util_downscaling_threshold: float = (
DEFAULT_ACTOR_POOL_UTIL_DOWNSCALING_THRESHOLD
)
# Maximum number of actors to scale up in a single scaling decision
actor_pool_max_upscaling_delta: int = DEFAULT_ACTOR_POOL_MAX_UPSCALING_DELTA
def _execution_options_factory() -> "ExecutionOptions":
# Lazily import to avoid circular dependencies.
from ray.data._internal.execution.interfaces import ExecutionOptions
return ExecutionOptions()
def _deduce_default_shuffle_algorithm() -> ShuffleStrategy:
if DEFAULT_USE_PUSH_BASED_SHUFFLE:
logger.warning(
"RAY_DATA_PUSH_BASED_SHUFFLE is deprecated, please use "
"RAY_DATA_DEFAULT_SHUFFLE_STRATEGY to set shuffling strategy"
)
return ShuffleStrategy.SORT_SHUFFLE_PUSH_BASED
else:
vs = [s for s in ShuffleStrategy] # noqa: C416
assert DEFAULT_SHUFFLE_STRATEGY in vs, (
f"RAY_DATA_DEFAULT_SHUFFLE_STRATEGY has to be one of the [{','.join(vs)}] "
f"(got {DEFAULT_SHUFFLE_STRATEGY})"
)
return DEFAULT_SHUFFLE_STRATEGY
def _issue_detectors_config_factory() -> "IssueDetectorsConfiguration":
# Lazily import to avoid circular dependencies.
from ray.data._internal.issue_detection.issue_detector_configuration import (
IssueDetectorsConfiguration,
)
return IssueDetectorsConfiguration()
@DeveloperAPI
@dataclass
| AutoscalingConfig |
python | ray-project__ray | rllib/env/tests/test_multi_agent_env.py | {
"start": 11106,
"end": 13762
} | class ____(MultiAgentEnv):
"""Env of N independent agents, each of which exits after 5 steps.
On each step() of the env, only one agent takes an action."""
def __init__(self, num, increment_obs=False):
super().__init__()
if increment_obs:
# Observations are 0, 1, 2, 3... etc. as time advances
self.envs = [MockEnv2(5) for _ in range(num)]
else:
# Observations are all zeros
self.envs = [MockEnv(5) for _ in range(num)]
self._agent_ids = set(range(num))
self.terminateds = set()
self.truncateds = set()
self.last_obs = {}
self.last_rew = {}
self.last_terminated = {}
self.last_truncated = {}
self.last_info = {}
self.i = 0
self.num = num
self.observation_space = gym.spaces.Discrete(10)
self.action_space = gym.spaces.Discrete(2)
def reset(self, *, seed=None, options=None):
self.terminateds = set()
self.truncateds = set()
self.last_obs = {}
self.last_rew = {}
self.last_terminated = {}
self.last_truncated = {}
self.last_info = {}
self.i = 0
for i, a in enumerate(self.envs):
self.last_obs[i], self.last_info[i] = a.reset()
self.last_rew[i] = 0
self.last_terminated[i] = False
self.last_truncated[i] = False
obs_dict = {self.i: self.last_obs[self.i]}
info_dict = {self.i: self.last_info[self.i]}
self.i = (self.i + 1) % self.num
return obs_dict, info_dict
def step(self, action_dict):
assert len(self.terminateds) != len(self.envs)
for i, action in action_dict.items():
(
self.last_obs[i],
self.last_rew[i],
self.last_terminated[i],
self.last_truncated[i],
self.last_info[i],
) = self.envs[i].step(action)
obs = {self.i: self.last_obs[self.i]}
rew = {self.i: self.last_rew[self.i]}
terminated = {self.i: self.last_terminated[self.i]}
truncated = {self.i: self.last_truncated[self.i]}
info = {self.i: self.last_info[self.i]}
if terminated[self.i]:
rew[self.i] = 0
self.terminateds.add(self.i)
if truncated[self.i]:
self.truncateds.add(self.i)
self.i = (self.i + 1) % self.num
terminated["__all__"] = len(self.terminateds) == len(self.envs)
truncated["__all__"] = len(self.truncateds) == len(self.envs)
return obs, rew, terminated, truncated, info
| RoundRobinMultiAgent |
python | MongoEngine__mongoengine | tests/utils.py | {
"start": 413,
"end": 2955
} | class ____(unittest.TestCase):
"""Base class for tests that need a mongodb connection
It ensures that the db is clean at the beginning and dropped at the end automatically
"""
@classmethod
def setUpClass(cls):
disconnect_all()
cls._connection = connect(db=MONGO_TEST_DB)
cls._connection.drop_database(MONGO_TEST_DB)
cls.db = get_db()
@classmethod
def tearDownClass(cls):
cls._connection.drop_database(MONGO_TEST_DB)
disconnect_all()
def get_as_pymongo(doc):
"""Fetch the pymongo version of a certain Document"""
return doc.__class__.objects.as_pymongo().get(id=doc.id)
def requires_mongodb_lt_42(func):
return _decorated_with_ver_requirement(func, (4, 2), oper=operator.lt)
def requires_mongodb_gte_40(func):
return _decorated_with_ver_requirement(func, (4, 0), oper=operator.ge)
def requires_mongodb_gte_42(func):
return _decorated_with_ver_requirement(func, (4, 2), oper=operator.ge)
def requires_mongodb_gte_44(func):
return _decorated_with_ver_requirement(func, (4, 4), oper=operator.ge)
def requires_mongodb_gte_50(func):
return _decorated_with_ver_requirement(func, (5, 0), oper=operator.ge)
def requires_mongodb_gte_60(func):
return _decorated_with_ver_requirement(func, (6, 0), oper=operator.ge)
def requires_mongodb_gte_70(func):
return _decorated_with_ver_requirement(func, (7, 0), oper=operator.ge)
def _decorated_with_ver_requirement(func, mongo_version_req, oper):
"""Return a MongoDB version requirement decorator.
The resulting decorator will skip the test if the current
MongoDB version doesn't match the provided version/operator.
For example, if you define a decorator like so:
def requires_mongodb_gte_36(func):
return _decorated_with_ver_requirement(
func, (3.6), oper=operator.ge
)
Then tests decorated with @requires_mongodb_gte_36 will be skipped if
ran against MongoDB < v3.6.
:param mongo_version_req: The mongodb version requirement (tuple(int, int))
:param oper: The operator to apply (e.g. operator.ge)
"""
@functools.wraps(func)
def _inner(*args, **kwargs):
mongodb_v = get_mongodb_version()
if oper(mongodb_v, mongo_version_req):
return func(*args, **kwargs)
else:
pretty_version = ".".join(str(n) for n in mongo_version_req)
pytest.skip(f"Needs MongoDB {oper.__name__} v{pretty_version}")
return _inner
| MongoDBTestCase |
python | getsentry__sentry | src/sentry/models/transaction_threshold.py | {
"start": 1873,
"end": 3069
} | class ____(DefaultFieldsModelExisting):
__relocation_scope__ = RelocationScope.Excluded
# max_length here is based on the maximum for transactions in relay
transaction = models.CharField(max_length=200)
project = FlexibleForeignKey("sentry.Project", db_constraint=False)
organization = FlexibleForeignKey("sentry.Organization")
threshold = models.IntegerField()
metric = models.PositiveSmallIntegerField(default=TransactionMetric.DURATION.value)
edited_by_id = HybridCloudForeignKey("sentry.User", null=True, on_delete="SET_NULL")
class Meta:
app_label = "sentry"
db_table = "sentry_projecttransactionthresholdoverride"
unique_together = (("project", "transaction"),)
@classmethod
def filter(cls, project_ids, organization_id, order_by, value_list):
cache_key = get_project_threshold_cache_key(
"sentry_projecttransactionthresholdoverride",
project_ids,
organization_id,
order_by,
value_list,
)
return _filter_and_cache(cls, cache_key, project_ids, organization_id, order_by, value_list)
@region_silo_model
| ProjectTransactionThresholdOverride |
python | getsentry__sentry | src/sentry/explore/models.py | {
"start": 4970,
"end": 10352
} | class ____(BaseManager["ExploreSavedQueryStarred"]):
def get_last_position(self, organization: Organization, user_id: int) -> int:
"""
Returns the last position of a user's starred queries in an organization.
"""
last_starred_query = (
self.filter(
organization=organization, user_id=user_id, position__isnull=False, starred=True
)
.order_by("-position")
.first()
)
if last_starred_query:
return last_starred_query.position # type: ignore[return-value]
return 0
def get_starred_query(
self, organization: Organization, user_id: int, query: ExploreSavedQuery
) -> ExploreSavedQueryStarred | None:
"""
Returns the starred query if it exists, otherwise None.
"""
return self.filter(
organization=organization, user_id=user_id, explore_saved_query=query
).first()
def reorder_starred_queries(
self, organization: Organization, user_id: int, new_query_positions: list[int]
):
"""
Reorders the positions of starred queries for a user in an organization.
Does NOT add or remove starred queries.
Args:
organization: The organization the queries belong to
user_id: The ID of the user whose starred queries are being reordered
new_query_positions: List of query IDs in their new order
Raises:
ValueError: If there's a mismatch between existing starred queries and the provided list
"""
existing_starred_queries = self.filter(
organization=organization,
user_id=user_id,
position__isnull=False,
starred=True,
)
existing_query_ids = {query.explore_saved_query.id for query in existing_starred_queries}
new_query_ids = set(new_query_positions)
if existing_query_ids != new_query_ids:
raise ValueError("Mismatch between existing and provided starred queries.")
position_map = {query_id: idx for idx, query_id in enumerate(new_query_positions)}
queries_to_update = list(existing_starred_queries)
for query in queries_to_update:
query.position = position_map[query.explore_saved_query.id]
if queries_to_update:
self.bulk_update(queries_to_update, ["position"])
def insert_starred_query(
self,
organization: Organization,
user_id: int,
query: ExploreSavedQuery,
starred: bool = True,
) -> bool:
"""
Inserts a new starred query at the end of the list.
Args:
organization: The organization the queries belong to
user_id: The ID of the user whose starred queries are being updated
explore_saved_query: The query to insert
Returns:
True if the query was starred, False if the query was already starred
"""
with transaction.atomic(using=router.db_for_write(ExploreSavedQueryStarred)):
if self.get_starred_query(organization, user_id, query):
return False
position = self.get_last_position(organization, user_id) + 1
self.create(
organization=organization,
user_id=user_id,
explore_saved_query=query,
position=position,
starred=starred,
)
return True
def delete_starred_query(
self, organization: Organization, user_id: int, query: ExploreSavedQuery
) -> bool:
"""
Deletes a starred query from the list.
Decrements the position of all queries after the deletion point.
Args:
organization: The organization the queries belong to
user_id: The ID of the user whose starred queries are being updated
explore_saved_query: The query to delete
Returns:
True if the query was unstarred, False if the query was already unstarred
"""
with transaction.atomic(using=router.db_for_write(ExploreSavedQueryStarred)):
if not (starred_query := self.get_starred_query(organization, user_id, query)):
return False
deleted_position = starred_query.position
starred_query.delete()
self.filter(
organization=organization, user_id=user_id, position__gt=deleted_position
).update(position=models.F("position") - 1)
return True
def updated_starred_query(
self,
organization: Organization,
user_id: int,
query: ExploreSavedQuery,
starred: bool,
) -> bool:
"""
Updates the starred status of a query.
"""
with transaction.atomic(using=router.db_for_write(ExploreSavedQueryStarred)):
if not (starred_query := self.get_starred_query(organization, user_id, query)):
return False
starred_query.starred = starred
if starred:
starred_query.position = self.get_last_position(organization, user_id) + 1
else:
starred_query.position = None
starred_query.save()
return True
@region_silo_model
| ExploreSavedQueryStarredManager |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 95612,
"end": 96508
} | class ____(MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Hinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.3
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.1
Usage with `compile()` API:
```python
model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Hinge()])
```
"""
def __init__(self, name='hinge', dtype=None):
super(Hinge, self).__init__(hinge, name, dtype=dtype)
| Hinge |
python | walkccc__LeetCode | solutions/1730. Shortest Path to Get Food/1730.py | {
"start": 0,
"end": 843
} | class ____:
def getFood(self, grid: list[list[str]]) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(grid)
n = len(grid[0])
ans = 0
q = collections.deque([self._getStartLocation(grid)])
while q:
for _ in range(len(q)):
i, j = q.popleft()
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
if grid[x][y] == 'X':
continue
if grid[x][y] == '#':
return ans + 1
q.append((x, y))
grid[x][y] = 'X' # Mark as visited.
ans += 1
return -1
def _getStartLocation(self, grid: list[list[str]]) -> tuple[int, int]:
for i, row in enumerate(grid):
for j, cell in enumerate(row):
if cell == '*':
return (i, j)
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.